ARMAsmParser.cpp revision b1d081230e40e5c86f3cc44a7cfd7241732eabfb
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "llvm/MC/MCTargetAsmParser.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMBaseInfo.h"
13#include "MCTargetDesc/ARMMCExpr.h"
14#include "llvm/ADT/BitVector.h"
15#include "llvm/ADT/OwningPtr.h"
16#include "llvm/ADT/STLExtras.h"
17#include "llvm/ADT/SmallVector.h"
18#include "llvm/ADT/StringSwitch.h"
19#include "llvm/ADT/Twine.h"
20#include "llvm/MC/MCAsmInfo.h"
21#include "llvm/MC/MCAssembler.h"
22#include "llvm/MC/MCContext.h"
23#include "llvm/MC/MCELFStreamer.h"
24#include "llvm/MC/MCExpr.h"
25#include "llvm/MC/MCInst.h"
26#include "llvm/MC/MCInstrDesc.h"
27#include "llvm/MC/MCParser/MCAsmLexer.h"
28#include "llvm/MC/MCParser/MCAsmParser.h"
29#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
30#include "llvm/MC/MCRegisterInfo.h"
31#include "llvm/MC/MCStreamer.h"
32#include "llvm/MC/MCSubtargetInfo.h"
33#include "llvm/Support/ELF.h"
34#include "llvm/Support/MathExtras.h"
35#include "llvm/Support/SourceMgr.h"
36#include "llvm/Support/TargetRegistry.h"
37#include "llvm/Support/raw_ostream.h"
38
39using namespace llvm;
40
41namespace {
42
43class ARMOperand;
44
45enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
46
47class ARMAsmParser : public MCTargetAsmParser {
48  MCSubtargetInfo &STI;
49  MCAsmParser &Parser;
50  const MCRegisterInfo *MRI;
51
52  // Map of register aliases registers via the .req directive.
53  StringMap<unsigned> RegisterReqs;
54
55  struct {
56    ARMCC::CondCodes Cond;    // Condition for IT block.
57    unsigned Mask:4;          // Condition mask for instructions.
58                              // Starting at first 1 (from lsb).
59                              //   '1'  condition as indicated in IT.
60                              //   '0'  inverse of condition (else).
61                              // Count of instructions in IT block is
62                              // 4 - trailingzeroes(mask)
63
64    bool FirstCond;           // Explicit flag for when we're parsing the
65                              // First instruction in the IT block. It's
66                              // implied in the mask, so needs special
67                              // handling.
68
69    unsigned CurPosition;     // Current position in parsing of IT
70                              // block. In range [0,3]. Initialized
71                              // according to count of instructions in block.
72                              // ~0U if no active IT block.
73  } ITState;
74  bool inITBlock() { return ITState.CurPosition != ~0U;}
75  void forwardITPosition() {
76    if (!inITBlock()) return;
77    // Move to the next instruction in the IT block, if there is one. If not,
78    // mark the block as done.
79    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
80    if (++ITState.CurPosition == 5 - TZ)
81      ITState.CurPosition = ~0U; // Done with the IT block after this.
82  }
83
84
85  MCAsmParser &getParser() const { return Parser; }
86  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
87
88  bool Warning(SMLoc L, const Twine &Msg,
89               ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
90    return Parser.Warning(L, Msg, Ranges);
91  }
92  bool Error(SMLoc L, const Twine &Msg,
93             ArrayRef<SMRange> Ranges = ArrayRef<SMRange>()) {
94    return Parser.Error(L, Msg, Ranges);
95  }
96
97  int tryParseRegister();
98  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
99  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
100  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
101  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
102  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
103  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
104  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
105                              unsigned &ShiftAmount);
106  bool parseDirectiveWord(unsigned Size, SMLoc L);
107  bool parseDirectiveThumb(SMLoc L);
108  bool parseDirectiveARM(SMLoc L);
109  bool parseDirectiveThumbFunc(SMLoc L);
110  bool parseDirectiveCode(SMLoc L);
111  bool parseDirectiveSyntax(SMLoc L);
112  bool parseDirectiveReq(StringRef Name, SMLoc L);
113  bool parseDirectiveUnreq(SMLoc L);
114  bool parseDirectiveArch(SMLoc L);
115  bool parseDirectiveEabiAttr(SMLoc L);
116
117  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
118                          bool &CarrySetting, unsigned &ProcessorIMod,
119                          StringRef &ITMask);
120  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
121                             bool &CanAcceptPredicationCode);
122
123  bool isThumb() const {
124    // FIXME: Can tablegen auto-generate this?
125    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
126  }
127  bool isThumbOne() const {
128    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
129  }
130  bool isThumbTwo() const {
131    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
132  }
133  bool hasV6Ops() const {
134    return STI.getFeatureBits() & ARM::HasV6Ops;
135  }
136  bool hasV7Ops() const {
137    return STI.getFeatureBits() & ARM::HasV7Ops;
138  }
139  void SwitchMode() {
140    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
141    setAvailableFeatures(FB);
142  }
143  bool isMClass() const {
144    return STI.getFeatureBits() & ARM::FeatureMClass;
145  }
146
147  /// @name Auto-generated Match Functions
148  /// {
149
150#define GET_ASSEMBLER_HEADER
151#include "ARMGenAsmMatcher.inc"
152
153  /// }
154
155  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseCoprocNumOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parseCoprocRegOperand(
159    SmallVectorImpl<MCParsedAsmOperand*>&);
160  OperandMatchResultTy parseCoprocOptionOperand(
161    SmallVectorImpl<MCParsedAsmOperand*>&);
162  OperandMatchResultTy parseMemBarrierOptOperand(
163    SmallVectorImpl<MCParsedAsmOperand*>&);
164  OperandMatchResultTy parseProcIFlagsOperand(
165    SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseMSRMaskOperand(
167    SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
169                                   StringRef Op, int Low, int High);
170  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
171    return parsePKHImm(O, "lsl", 0, 31);
172  }
173  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
174    return parsePKHImm(O, "asr", 1, 32);
175  }
176  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
177  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
178  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
179  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
180  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
181  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
182  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
183  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
184  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index,
185                                       SMLoc &EndLoc);
186
187  // Asm Match Converter Methods
188  void cvtT2LdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
189  void cvtT2StrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
190  void cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  void cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  void cvtLdWriteBackRegAddrMode2(MCInst &Inst,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  void cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
197                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
198  void cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
199                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
200  void cvtStWriteBackRegAddrMode2(MCInst &Inst,
201                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
202  void cvtStWriteBackRegAddrMode3(MCInst &Inst,
203                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
204  void cvtLdExtTWriteBackImm(MCInst &Inst,
205                             const SmallVectorImpl<MCParsedAsmOperand*> &);
206  void cvtLdExtTWriteBackReg(MCInst &Inst,
207                             const SmallVectorImpl<MCParsedAsmOperand*> &);
208  void cvtStExtTWriteBackImm(MCInst &Inst,
209                             const SmallVectorImpl<MCParsedAsmOperand*> &);
210  void cvtStExtTWriteBackReg(MCInst &Inst,
211                             const SmallVectorImpl<MCParsedAsmOperand*> &);
212  void cvtLdrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
213  void cvtStrdPre(MCInst &Inst, const SmallVectorImpl<MCParsedAsmOperand*> &);
214  void cvtLdWriteBackRegAddrMode3(MCInst &Inst,
215                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
216  void cvtThumbMultiply(MCInst &Inst,
217                        const SmallVectorImpl<MCParsedAsmOperand*> &);
218  void cvtVLDwbFixed(MCInst &Inst,
219                     const SmallVectorImpl<MCParsedAsmOperand*> &);
220  void cvtVLDwbRegister(MCInst &Inst,
221                        const SmallVectorImpl<MCParsedAsmOperand*> &);
222  void cvtVSTwbFixed(MCInst &Inst,
223                     const SmallVectorImpl<MCParsedAsmOperand*> &);
224  void cvtVSTwbRegister(MCInst &Inst,
225                        const SmallVectorImpl<MCParsedAsmOperand*> &);
226  bool validateInstruction(MCInst &Inst,
227                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
228  bool processInstruction(MCInst &Inst,
229                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
230  bool shouldOmitCCOutOperand(StringRef Mnemonic,
231                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
232
233public:
234  enum ARMMatchResultTy {
235    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
236    Match_RequiresNotITBlock,
237    Match_RequiresV6,
238    Match_RequiresThumb2,
239#define GET_OPERAND_DIAGNOSTIC_TYPES
240#include "ARMGenAsmMatcher.inc"
241
242  };
243
244  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
245    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
246    MCAsmParserExtension::Initialize(_Parser);
247
248    // Cache the MCRegisterInfo.
249    MRI = &getContext().getRegisterInfo();
250
251    // Initialize the set of available features.
252    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
253
254    // Not in an ITBlock to start with.
255    ITState.CurPosition = ~0U;
256
257    // Set ELF header flags.
258    // FIXME: This should eventually end up somewhere else where more
259    // intelligent flag decisions can be made. For now we are just maintaining
260    // the statu/parseDirects quo for ARM and setting EF_ARM_EABI_VER5 as the default.
261    if (MCELFStreamer *MES = dyn_cast<MCELFStreamer>(&Parser.getStreamer()))
262      MES->getAssembler().setELFHeaderEFlags(ELF::EF_ARM_EABI_VER5);
263  }
264
265  // Implementation of the MCTargetAsmParser interface:
266  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
267  bool ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
268                        SMLoc NameLoc,
269                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
270  bool ParseDirective(AsmToken DirectiveID);
271
272  unsigned validateTargetOperandClass(MCParsedAsmOperand *Op, unsigned Kind);
273  unsigned checkTargetMatchPredicate(MCInst &Inst);
274
275  bool MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
276                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
277                               MCStreamer &Out, unsigned &ErrorInfo,
278                               bool MatchingInlineAsm);
279};
280} // end anonymous namespace
281
282namespace {
283
284/// ARMOperand - Instances of this class represent a parsed ARM machine
285/// operand.
286class ARMOperand : public MCParsedAsmOperand {
287  enum KindTy {
288    k_CondCode,
289    k_CCOut,
290    k_ITCondMask,
291    k_CoprocNum,
292    k_CoprocReg,
293    k_CoprocOption,
294    k_Immediate,
295    k_MemBarrierOpt,
296    k_Memory,
297    k_PostIndexRegister,
298    k_MSRMask,
299    k_ProcIFlags,
300    k_VectorIndex,
301    k_Register,
302    k_RegisterList,
303    k_DPRRegisterList,
304    k_SPRRegisterList,
305    k_VectorList,
306    k_VectorListAllLanes,
307    k_VectorListIndexed,
308    k_ShiftedRegister,
309    k_ShiftedImmediate,
310    k_ShifterImmediate,
311    k_RotateImmediate,
312    k_BitfieldDescriptor,
313    k_Token
314  } Kind;
315
316  SMLoc StartLoc, EndLoc;
317  SmallVector<unsigned, 8> Registers;
318
319  union {
320    struct {
321      ARMCC::CondCodes Val;
322    } CC;
323
324    struct {
325      unsigned Val;
326    } Cop;
327
328    struct {
329      unsigned Val;
330    } CoprocOption;
331
332    struct {
333      unsigned Mask:4;
334    } ITMask;
335
336    struct {
337      ARM_MB::MemBOpt Val;
338    } MBOpt;
339
340    struct {
341      ARM_PROC::IFlags Val;
342    } IFlags;
343
344    struct {
345      unsigned Val;
346    } MMask;
347
348    struct {
349      const char *Data;
350      unsigned Length;
351    } Tok;
352
353    struct {
354      unsigned RegNum;
355    } Reg;
356
357    // A vector register list is a sequential list of 1 to 4 registers.
358    struct {
359      unsigned RegNum;
360      unsigned Count;
361      unsigned LaneIndex;
362      bool isDoubleSpaced;
363    } VectorList;
364
365    struct {
366      unsigned Val;
367    } VectorIndex;
368
369    struct {
370      const MCExpr *Val;
371    } Imm;
372
373    /// Combined record for all forms of ARM address expressions.
374    struct {
375      unsigned BaseRegNum;
376      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
377      // was specified.
378      const MCConstantExpr *OffsetImm;  // Offset immediate value
379      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
380      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
381      unsigned ShiftImm;        // shift for OffsetReg.
382      unsigned Alignment;       // 0 = no alignment specified
383                                // n = alignment in bytes (2, 4, 8, 16, or 32)
384      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
385    } Memory;
386
387    struct {
388      unsigned RegNum;
389      bool isAdd;
390      ARM_AM::ShiftOpc ShiftTy;
391      unsigned ShiftImm;
392    } PostIdxReg;
393
394    struct {
395      bool isASR;
396      unsigned Imm;
397    } ShifterImm;
398    struct {
399      ARM_AM::ShiftOpc ShiftTy;
400      unsigned SrcReg;
401      unsigned ShiftReg;
402      unsigned ShiftImm;
403    } RegShiftedReg;
404    struct {
405      ARM_AM::ShiftOpc ShiftTy;
406      unsigned SrcReg;
407      unsigned ShiftImm;
408    } RegShiftedImm;
409    struct {
410      unsigned Imm;
411    } RotImm;
412    struct {
413      unsigned LSB;
414      unsigned Width;
415    } Bitfield;
416  };
417
418  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
419public:
420  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
421    Kind = o.Kind;
422    StartLoc = o.StartLoc;
423    EndLoc = o.EndLoc;
424    switch (Kind) {
425    case k_CondCode:
426      CC = o.CC;
427      break;
428    case k_ITCondMask:
429      ITMask = o.ITMask;
430      break;
431    case k_Token:
432      Tok = o.Tok;
433      break;
434    case k_CCOut:
435    case k_Register:
436      Reg = o.Reg;
437      break;
438    case k_RegisterList:
439    case k_DPRRegisterList:
440    case k_SPRRegisterList:
441      Registers = o.Registers;
442      break;
443    case k_VectorList:
444    case k_VectorListAllLanes:
445    case k_VectorListIndexed:
446      VectorList = o.VectorList;
447      break;
448    case k_CoprocNum:
449    case k_CoprocReg:
450      Cop = o.Cop;
451      break;
452    case k_CoprocOption:
453      CoprocOption = o.CoprocOption;
454      break;
455    case k_Immediate:
456      Imm = o.Imm;
457      break;
458    case k_MemBarrierOpt:
459      MBOpt = o.MBOpt;
460      break;
461    case k_Memory:
462      Memory = o.Memory;
463      break;
464    case k_PostIndexRegister:
465      PostIdxReg = o.PostIdxReg;
466      break;
467    case k_MSRMask:
468      MMask = o.MMask;
469      break;
470    case k_ProcIFlags:
471      IFlags = o.IFlags;
472      break;
473    case k_ShifterImmediate:
474      ShifterImm = o.ShifterImm;
475      break;
476    case k_ShiftedRegister:
477      RegShiftedReg = o.RegShiftedReg;
478      break;
479    case k_ShiftedImmediate:
480      RegShiftedImm = o.RegShiftedImm;
481      break;
482    case k_RotateImmediate:
483      RotImm = o.RotImm;
484      break;
485    case k_BitfieldDescriptor:
486      Bitfield = o.Bitfield;
487      break;
488    case k_VectorIndex:
489      VectorIndex = o.VectorIndex;
490      break;
491    }
492  }
493
494  /// getStartLoc - Get the location of the first token of this operand.
495  SMLoc getStartLoc() const { return StartLoc; }
496  /// getEndLoc - Get the location of the last token of this operand.
497  SMLoc getEndLoc() const { return EndLoc; }
498  /// getLocRange - Get the range between the first and last token of this
499  /// operand.
500  SMRange getLocRange() const { return SMRange(StartLoc, EndLoc); }
501
502  ARMCC::CondCodes getCondCode() const {
503    assert(Kind == k_CondCode && "Invalid access!");
504    return CC.Val;
505  }
506
507  unsigned getCoproc() const {
508    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
509    return Cop.Val;
510  }
511
512  StringRef getToken() const {
513    assert(Kind == k_Token && "Invalid access!");
514    return StringRef(Tok.Data, Tok.Length);
515  }
516
517  unsigned getReg() const {
518    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
519    return Reg.RegNum;
520  }
521
522  const SmallVectorImpl<unsigned> &getRegList() const {
523    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
524            Kind == k_SPRRegisterList) && "Invalid access!");
525    return Registers;
526  }
527
528  const MCExpr *getImm() const {
529    assert(isImm() && "Invalid access!");
530    return Imm.Val;
531  }
532
533  unsigned getVectorIndex() const {
534    assert(Kind == k_VectorIndex && "Invalid access!");
535    return VectorIndex.Val;
536  }
537
538  ARM_MB::MemBOpt getMemBarrierOpt() const {
539    assert(Kind == k_MemBarrierOpt && "Invalid access!");
540    return MBOpt.Val;
541  }
542
543  ARM_PROC::IFlags getProcIFlags() const {
544    assert(Kind == k_ProcIFlags && "Invalid access!");
545    return IFlags.Val;
546  }
547
548  unsigned getMSRMask() const {
549    assert(Kind == k_MSRMask && "Invalid access!");
550    return MMask.Val;
551  }
552
553  bool isCoprocNum() const { return Kind == k_CoprocNum; }
554  bool isCoprocReg() const { return Kind == k_CoprocReg; }
555  bool isCoprocOption() const { return Kind == k_CoprocOption; }
556  bool isCondCode() const { return Kind == k_CondCode; }
557  bool isCCOut() const { return Kind == k_CCOut; }
558  bool isITMask() const { return Kind == k_ITCondMask; }
559  bool isITCondCode() const { return Kind == k_CondCode; }
560  bool isImm() const { return Kind == k_Immediate; }
561  bool isFPImm() const {
562    if (!isImm()) return false;
563    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
564    if (!CE) return false;
565    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
566    return Val != -1;
567  }
568  bool isFBits16() const {
569    if (!isImm()) return false;
570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
571    if (!CE) return false;
572    int64_t Value = CE->getValue();
573    return Value >= 0 && Value <= 16;
574  }
575  bool isFBits32() const {
576    if (!isImm()) return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return Value >= 1 && Value <= 32;
581  }
582  bool isImm8s4() const {
583    if (!isImm()) return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = CE->getValue();
587    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
588  }
589  bool isImm0_1020s4() const {
590    if (!isImm()) return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
595  }
596  bool isImm0_508s4() const {
597    if (!isImm()) return false;
598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
599    if (!CE) return false;
600    int64_t Value = CE->getValue();
601    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
602  }
603  bool isImm0_508s4Neg() const {
604    if (!isImm()) return false;
605    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
606    if (!CE) return false;
607    int64_t Value = -CE->getValue();
608    // explicitly exclude zero. we want that to use the normal 0_508 version.
609    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
610  }
611  bool isImm0_255() const {
612    if (!isImm()) return false;
613    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
614    if (!CE) return false;
615    int64_t Value = CE->getValue();
616    return Value >= 0 && Value < 256;
617  }
618  bool isImm0_4095() const {
619    if (!isImm()) return false;
620    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
621    if (!CE) return false;
622    int64_t Value = CE->getValue();
623    return Value >= 0 && Value < 4096;
624  }
625  bool isImm0_4095Neg() const {
626    if (!isImm()) return false;
627    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
628    if (!CE) return false;
629    int64_t Value = -CE->getValue();
630    return Value > 0 && Value < 4096;
631  }
632  bool isImm0_1() const {
633    if (!isImm()) return false;
634    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
635    if (!CE) return false;
636    int64_t Value = CE->getValue();
637    return Value >= 0 && Value < 2;
638  }
639  bool isImm0_3() const {
640    if (!isImm()) return false;
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642    if (!CE) return false;
643    int64_t Value = CE->getValue();
644    return Value >= 0 && Value < 4;
645  }
646  bool isImm0_7() const {
647    if (!isImm()) return false;
648    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
649    if (!CE) return false;
650    int64_t Value = CE->getValue();
651    return Value >= 0 && Value < 8;
652  }
653  bool isImm0_15() const {
654    if (!isImm()) return false;
655    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
656    if (!CE) return false;
657    int64_t Value = CE->getValue();
658    return Value >= 0 && Value < 16;
659  }
660  bool isImm0_31() const {
661    if (!isImm()) return false;
662    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
663    if (!CE) return false;
664    int64_t Value = CE->getValue();
665    return Value >= 0 && Value < 32;
666  }
667  bool isImm0_63() const {
668    if (!isImm()) return false;
669    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
670    if (!CE) return false;
671    int64_t Value = CE->getValue();
672    return Value >= 0 && Value < 64;
673  }
674  bool isImm8() const {
675    if (!isImm()) return false;
676    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
677    if (!CE) return false;
678    int64_t Value = CE->getValue();
679    return Value == 8;
680  }
681  bool isImm16() const {
682    if (!isImm()) return false;
683    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
684    if (!CE) return false;
685    int64_t Value = CE->getValue();
686    return Value == 16;
687  }
688  bool isImm32() const {
689    if (!isImm()) return false;
690    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
691    if (!CE) return false;
692    int64_t Value = CE->getValue();
693    return Value == 32;
694  }
695  bool isShrImm8() const {
696    if (!isImm()) return false;
697    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
698    if (!CE) return false;
699    int64_t Value = CE->getValue();
700    return Value > 0 && Value <= 8;
701  }
702  bool isShrImm16() const {
703    if (!isImm()) return false;
704    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
705    if (!CE) return false;
706    int64_t Value = CE->getValue();
707    return Value > 0 && Value <= 16;
708  }
709  bool isShrImm32() const {
710    if (!isImm()) return false;
711    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712    if (!CE) return false;
713    int64_t Value = CE->getValue();
714    return Value > 0 && Value <= 32;
715  }
716  bool isShrImm64() const {
717    if (!isImm()) return false;
718    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
719    if (!CE) return false;
720    int64_t Value = CE->getValue();
721    return Value > 0 && Value <= 64;
722  }
723  bool isImm1_7() const {
724    if (!isImm()) return false;
725    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
726    if (!CE) return false;
727    int64_t Value = CE->getValue();
728    return Value > 0 && Value < 8;
729  }
730  bool isImm1_15() const {
731    if (!isImm()) return false;
732    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
733    if (!CE) return false;
734    int64_t Value = CE->getValue();
735    return Value > 0 && Value < 16;
736  }
737  bool isImm1_31() const {
738    if (!isImm()) return false;
739    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
740    if (!CE) return false;
741    int64_t Value = CE->getValue();
742    return Value > 0 && Value < 32;
743  }
744  bool isImm1_16() const {
745    if (!isImm()) return false;
746    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
747    if (!CE) return false;
748    int64_t Value = CE->getValue();
749    return Value > 0 && Value < 17;
750  }
751  bool isImm1_32() const {
752    if (!isImm()) return false;
753    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754    if (!CE) return false;
755    int64_t Value = CE->getValue();
756    return Value > 0 && Value < 33;
757  }
758  bool isImm0_32() const {
759    if (!isImm()) return false;
760    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
761    if (!CE) return false;
762    int64_t Value = CE->getValue();
763    return Value >= 0 && Value < 33;
764  }
765  bool isImm0_65535() const {
766    if (!isImm()) return false;
767    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
768    if (!CE) return false;
769    int64_t Value = CE->getValue();
770    return Value >= 0 && Value < 65536;
771  }
772  bool isImm0_65535Expr() const {
773    if (!isImm()) return false;
774    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
775    // If it's not a constant expression, it'll generate a fixup and be
776    // handled later.
777    if (!CE) return true;
778    int64_t Value = CE->getValue();
779    return Value >= 0 && Value < 65536;
780  }
781  bool isImm24bit() const {
782    if (!isImm()) return false;
783    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
784    if (!CE) return false;
785    int64_t Value = CE->getValue();
786    return Value >= 0 && Value <= 0xffffff;
787  }
788  bool isImmThumbSR() const {
789    if (!isImm()) return false;
790    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
791    if (!CE) return false;
792    int64_t Value = CE->getValue();
793    return Value > 0 && Value < 33;
794  }
795  bool isPKHLSLImm() const {
796    if (!isImm()) return false;
797    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
798    if (!CE) return false;
799    int64_t Value = CE->getValue();
800    return Value >= 0 && Value < 32;
801  }
802  bool isPKHASRImm() const {
803    if (!isImm()) return false;
804    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
805    if (!CE) return false;
806    int64_t Value = CE->getValue();
807    return Value > 0 && Value <= 32;
808  }
809  bool isAdrLabel() const {
810    // If we have an immediate that's not a constant, treat it as a label
811    // reference needing a fixup. If it is a constant, but it can't fit
812    // into shift immediate encoding, we reject it.
813    if (isImm() && !isa<MCConstantExpr>(getImm())) return true;
814    else return (isARMSOImm() || isARMSOImmNeg());
815  }
816  bool isARMSOImm() const {
817    if (!isImm()) return false;
818    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
819    if (!CE) return false;
820    int64_t Value = CE->getValue();
821    return ARM_AM::getSOImmVal(Value) != -1;
822  }
823  bool isARMSOImmNot() const {
824    if (!isImm()) return false;
825    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
826    if (!CE) return false;
827    int64_t Value = CE->getValue();
828    return ARM_AM::getSOImmVal(~Value) != -1;
829  }
830  bool isARMSOImmNeg() const {
831    if (!isImm()) return false;
832    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
833    if (!CE) return false;
834    int64_t Value = CE->getValue();
835    // Only use this when not representable as a plain so_imm.
836    return ARM_AM::getSOImmVal(Value) == -1 &&
837      ARM_AM::getSOImmVal(-Value) != -1;
838  }
839  bool isT2SOImm() const {
840    if (!isImm()) return false;
841    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
842    if (!CE) return false;
843    int64_t Value = CE->getValue();
844    return ARM_AM::getT2SOImmVal(Value) != -1;
845  }
846  bool isT2SOImmNot() const {
847    if (!isImm()) return false;
848    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
849    if (!CE) return false;
850    int64_t Value = CE->getValue();
851    return ARM_AM::getT2SOImmVal(~Value) != -1;
852  }
853  bool isT2SOImmNeg() const {
854    if (!isImm()) return false;
855    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
856    if (!CE) return false;
857    int64_t Value = CE->getValue();
858    // Only use this when not representable as a plain so_imm.
859    return ARM_AM::getT2SOImmVal(Value) == -1 &&
860      ARM_AM::getT2SOImmVal(-Value) != -1;
861  }
862  bool isSetEndImm() const {
863    if (!isImm()) return false;
864    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
865    if (!CE) return false;
866    int64_t Value = CE->getValue();
867    return Value == 1 || Value == 0;
868  }
869  bool isReg() const { return Kind == k_Register; }
870  bool isRegList() const { return Kind == k_RegisterList; }
871  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
872  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
873  bool isToken() const { return Kind == k_Token; }
874  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
875  bool isMem() const { return Kind == k_Memory; }
876  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
877  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
878  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
879  bool isRotImm() const { return Kind == k_RotateImmediate; }
880  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
881  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
882  bool isPostIdxReg() const {
883    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
884  }
885  bool isMemNoOffset(bool alignOK = false) const {
886    if (!isMem())
887      return false;
888    // No offset of any kind.
889    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
890     (alignOK || Memory.Alignment == 0);
891  }
892  bool isMemPCRelImm12() const {
893    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
894      return false;
895    // Base register must be PC.
896    if (Memory.BaseRegNum != ARM::PC)
897      return false;
898    // Immediate offset in range [-4095, 4095].
899    if (!Memory.OffsetImm) return true;
900    int64_t Val = Memory.OffsetImm->getValue();
901    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
902  }
903  bool isAlignedMemory() const {
904    return isMemNoOffset(true);
905  }
906  bool isAddrMode2() const {
907    if (!isMem() || Memory.Alignment != 0) return false;
908    // Check for register offset.
909    if (Memory.OffsetRegNum) return true;
910    // Immediate offset in range [-4095, 4095].
911    if (!Memory.OffsetImm) return true;
912    int64_t Val = Memory.OffsetImm->getValue();
913    return Val > -4096 && Val < 4096;
914  }
915  bool isAM2OffsetImm() const {
916    if (!isImm()) return false;
917    // Immediate offset in range [-4095, 4095].
918    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
919    if (!CE) return false;
920    int64_t Val = CE->getValue();
921    return Val > -4096 && Val < 4096;
922  }
923  bool isAddrMode3() const {
924    // If we have an immediate that's not a constant, treat it as a label
925    // reference needing a fixup. If it is a constant, it's something else
926    // and we reject it.
927    if (isImm() && !isa<MCConstantExpr>(getImm()))
928      return true;
929    if (!isMem() || Memory.Alignment != 0) return false;
930    // No shifts are legal for AM3.
931    if (Memory.ShiftType != ARM_AM::no_shift) return false;
932    // Check for register offset.
933    if (Memory.OffsetRegNum) return true;
934    // Immediate offset in range [-255, 255].
935    if (!Memory.OffsetImm) return true;
936    int64_t Val = Memory.OffsetImm->getValue();
937    // The #-0 offset is encoded as INT32_MIN, and we have to check
938    // for this too.
939    return (Val > -256 && Val < 256) || Val == INT32_MIN;
940  }
941  bool isAM3Offset() const {
942    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
943      return false;
944    if (Kind == k_PostIndexRegister)
945      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
946    // Immediate offset in range [-255, 255].
947    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
948    if (!CE) return false;
949    int64_t Val = CE->getValue();
950    // Special case, #-0 is INT32_MIN.
951    return (Val > -256 && Val < 256) || Val == INT32_MIN;
952  }
953  bool isAddrMode5() const {
954    // If we have an immediate that's not a constant, treat it as a label
955    // reference needing a fixup. If it is a constant, it's something else
956    // and we reject it.
957    if (isImm() && !isa<MCConstantExpr>(getImm()))
958      return true;
959    if (!isMem() || Memory.Alignment != 0) return false;
960    // Check for register offset.
961    if (Memory.OffsetRegNum) return false;
962    // Immediate offset in range [-1020, 1020] and a multiple of 4.
963    if (!Memory.OffsetImm) return true;
964    int64_t Val = Memory.OffsetImm->getValue();
965    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
966      Val == INT32_MIN;
967  }
968  bool isMemTBB() const {
969    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
970        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
971      return false;
972    return true;
973  }
974  bool isMemTBH() const {
975    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
976        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
977        Memory.Alignment != 0 )
978      return false;
979    return true;
980  }
981  bool isMemRegOffset() const {
982    if (!isMem() || !Memory.OffsetRegNum || Memory.Alignment != 0)
983      return false;
984    return true;
985  }
986  bool isT2MemRegOffset() const {
987    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
988        Memory.Alignment != 0)
989      return false;
990    // Only lsl #{0, 1, 2, 3} allowed.
991    if (Memory.ShiftType == ARM_AM::no_shift)
992      return true;
993    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
994      return false;
995    return true;
996  }
997  bool isMemThumbRR() const {
998    // Thumb reg+reg addressing is simple. Just two registers, a base and
999    // an offset. No shifts, negations or any other complicating factors.
1000    if (!isMem() || !Memory.OffsetRegNum || Memory.isNegative ||
1001        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
1002      return false;
1003    return isARMLowRegister(Memory.BaseRegNum) &&
1004      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
1005  }
1006  bool isMemThumbRIs4() const {
1007    if (!isMem() || Memory.OffsetRegNum != 0 ||
1008        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1009      return false;
1010    // Immediate offset, multiple of 4 in range [0, 124].
1011    if (!Memory.OffsetImm) return true;
1012    int64_t Val = Memory.OffsetImm->getValue();
1013    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
1014  }
1015  bool isMemThumbRIs2() const {
1016    if (!isMem() || Memory.OffsetRegNum != 0 ||
1017        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1018      return false;
1019    // Immediate offset, multiple of 4 in range [0, 62].
1020    if (!Memory.OffsetImm) return true;
1021    int64_t Val = Memory.OffsetImm->getValue();
1022    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
1023  }
1024  bool isMemThumbRIs1() const {
1025    if (!isMem() || Memory.OffsetRegNum != 0 ||
1026        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
1027      return false;
1028    // Immediate offset in range [0, 31].
1029    if (!Memory.OffsetImm) return true;
1030    int64_t Val = Memory.OffsetImm->getValue();
1031    return Val >= 0 && Val <= 31;
1032  }
1033  bool isMemThumbSPI() const {
1034    if (!isMem() || Memory.OffsetRegNum != 0 ||
1035        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1036      return false;
1037    // Immediate offset, multiple of 4 in range [0, 1020].
1038    if (!Memory.OffsetImm) return true;
1039    int64_t Val = Memory.OffsetImm->getValue();
1040    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1041  }
1042  bool isMemImm8s4Offset() const {
1043    // If we have an immediate that's not a constant, treat it as a label
1044    // reference needing a fixup. If it is a constant, it's something else
1045    // and we reject it.
1046    if (isImm() && !isa<MCConstantExpr>(getImm()))
1047      return true;
1048    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1049      return false;
1050    // Immediate offset a multiple of 4 in range [-1020, 1020].
1051    if (!Memory.OffsetImm) return true;
1052    int64_t Val = Memory.OffsetImm->getValue();
1053    // Special case, #-0 is INT32_MIN.
1054    return (Val >= -1020 && Val <= 1020 && (Val & 3) == 0) || Val == INT32_MIN;
1055  }
1056  bool isMemImm0_1020s4Offset() const {
1057    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1058      return false;
1059    // Immediate offset a multiple of 4 in range [0, 1020].
1060    if (!Memory.OffsetImm) return true;
1061    int64_t Val = Memory.OffsetImm->getValue();
1062    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1063  }
1064  bool isMemImm8Offset() const {
1065    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1066      return false;
1067    // Base reg of PC isn't allowed for these encodings.
1068    if (Memory.BaseRegNum == ARM::PC) return false;
1069    // Immediate offset in range [-255, 255].
1070    if (!Memory.OffsetImm) return true;
1071    int64_t Val = Memory.OffsetImm->getValue();
1072    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1073  }
1074  bool isMemPosImm8Offset() const {
1075    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1076      return false;
1077    // Immediate offset in range [0, 255].
1078    if (!Memory.OffsetImm) return true;
1079    int64_t Val = Memory.OffsetImm->getValue();
1080    return Val >= 0 && Val < 256;
1081  }
1082  bool isMemNegImm8Offset() const {
1083    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1084      return false;
1085    // Base reg of PC isn't allowed for these encodings.
1086    if (Memory.BaseRegNum == ARM::PC) return false;
1087    // Immediate offset in range [-255, -1].
1088    if (!Memory.OffsetImm) return false;
1089    int64_t Val = Memory.OffsetImm->getValue();
1090    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1091  }
1092  bool isMemUImm12Offset() const {
1093    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1094      return false;
1095    // Immediate offset in range [0, 4095].
1096    if (!Memory.OffsetImm) return true;
1097    int64_t Val = Memory.OffsetImm->getValue();
1098    return (Val >= 0 && Val < 4096);
1099  }
1100  bool isMemImm12Offset() const {
1101    // If we have an immediate that's not a constant, treat it as a label
1102    // reference needing a fixup. If it is a constant, it's something else
1103    // and we reject it.
1104    if (isImm() && !isa<MCConstantExpr>(getImm()))
1105      return true;
1106
1107    if (!isMem() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1108      return false;
1109    // Immediate offset in range [-4095, 4095].
1110    if (!Memory.OffsetImm) return true;
1111    int64_t Val = Memory.OffsetImm->getValue();
1112    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1113  }
1114  bool isPostIdxImm8() const {
1115    if (!isImm()) return false;
1116    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1117    if (!CE) return false;
1118    int64_t Val = CE->getValue();
1119    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1120  }
1121  bool isPostIdxImm8s4() const {
1122    if (!isImm()) return false;
1123    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1124    if (!CE) return false;
1125    int64_t Val = CE->getValue();
1126    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1127      (Val == INT32_MIN);
1128  }
1129
1130  bool isMSRMask() const { return Kind == k_MSRMask; }
1131  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1132
1133  // NEON operands.
1134  bool isSingleSpacedVectorList() const {
1135    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1136  }
1137  bool isDoubleSpacedVectorList() const {
1138    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1139  }
1140  bool isVecListOneD() const {
1141    if (!isSingleSpacedVectorList()) return false;
1142    return VectorList.Count == 1;
1143  }
1144
1145  bool isVecListDPair() const {
1146    if (!isSingleSpacedVectorList()) return false;
1147    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1148              .contains(VectorList.RegNum));
1149  }
1150
1151  bool isVecListThreeD() const {
1152    if (!isSingleSpacedVectorList()) return false;
1153    return VectorList.Count == 3;
1154  }
1155
1156  bool isVecListFourD() const {
1157    if (!isSingleSpacedVectorList()) return false;
1158    return VectorList.Count == 4;
1159  }
1160
1161  bool isVecListDPairSpaced() const {
1162    if (isSingleSpacedVectorList()) return false;
1163    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1164              .contains(VectorList.RegNum));
1165  }
1166
1167  bool isVecListThreeQ() const {
1168    if (!isDoubleSpacedVectorList()) return false;
1169    return VectorList.Count == 3;
1170  }
1171
1172  bool isVecListFourQ() const {
1173    if (!isDoubleSpacedVectorList()) return false;
1174    return VectorList.Count == 4;
1175  }
1176
1177  bool isSingleSpacedVectorAllLanes() const {
1178    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1179  }
1180  bool isDoubleSpacedVectorAllLanes() const {
1181    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1182  }
1183  bool isVecListOneDAllLanes() const {
1184    if (!isSingleSpacedVectorAllLanes()) return false;
1185    return VectorList.Count == 1;
1186  }
1187
1188  bool isVecListDPairAllLanes() const {
1189    if (!isSingleSpacedVectorAllLanes()) return false;
1190    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1191              .contains(VectorList.RegNum));
1192  }
1193
1194  bool isVecListDPairSpacedAllLanes() const {
1195    if (!isDoubleSpacedVectorAllLanes()) return false;
1196    return VectorList.Count == 2;
1197  }
1198
1199  bool isVecListThreeDAllLanes() const {
1200    if (!isSingleSpacedVectorAllLanes()) return false;
1201    return VectorList.Count == 3;
1202  }
1203
1204  bool isVecListThreeQAllLanes() const {
1205    if (!isDoubleSpacedVectorAllLanes()) return false;
1206    return VectorList.Count == 3;
1207  }
1208
1209  bool isVecListFourDAllLanes() const {
1210    if (!isSingleSpacedVectorAllLanes()) return false;
1211    return VectorList.Count == 4;
1212  }
1213
1214  bool isVecListFourQAllLanes() const {
1215    if (!isDoubleSpacedVectorAllLanes()) return false;
1216    return VectorList.Count == 4;
1217  }
1218
1219  bool isSingleSpacedVectorIndexed() const {
1220    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1221  }
1222  bool isDoubleSpacedVectorIndexed() const {
1223    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1224  }
1225  bool isVecListOneDByteIndexed() const {
1226    if (!isSingleSpacedVectorIndexed()) return false;
1227    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1228  }
1229
1230  bool isVecListOneDHWordIndexed() const {
1231    if (!isSingleSpacedVectorIndexed()) return false;
1232    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1233  }
1234
1235  bool isVecListOneDWordIndexed() const {
1236    if (!isSingleSpacedVectorIndexed()) return false;
1237    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1238  }
1239
1240  bool isVecListTwoDByteIndexed() const {
1241    if (!isSingleSpacedVectorIndexed()) return false;
1242    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1243  }
1244
1245  bool isVecListTwoDHWordIndexed() const {
1246    if (!isSingleSpacedVectorIndexed()) return false;
1247    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1248  }
1249
1250  bool isVecListTwoQWordIndexed() const {
1251    if (!isDoubleSpacedVectorIndexed()) return false;
1252    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1253  }
1254
1255  bool isVecListTwoQHWordIndexed() const {
1256    if (!isDoubleSpacedVectorIndexed()) return false;
1257    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1258  }
1259
1260  bool isVecListTwoDWordIndexed() const {
1261    if (!isSingleSpacedVectorIndexed()) return false;
1262    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1263  }
1264
1265  bool isVecListThreeDByteIndexed() const {
1266    if (!isSingleSpacedVectorIndexed()) return false;
1267    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1268  }
1269
1270  bool isVecListThreeDHWordIndexed() const {
1271    if (!isSingleSpacedVectorIndexed()) return false;
1272    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1273  }
1274
1275  bool isVecListThreeQWordIndexed() const {
1276    if (!isDoubleSpacedVectorIndexed()) return false;
1277    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1278  }
1279
1280  bool isVecListThreeQHWordIndexed() const {
1281    if (!isDoubleSpacedVectorIndexed()) return false;
1282    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1283  }
1284
1285  bool isVecListThreeDWordIndexed() const {
1286    if (!isSingleSpacedVectorIndexed()) return false;
1287    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1288  }
1289
1290  bool isVecListFourDByteIndexed() const {
1291    if (!isSingleSpacedVectorIndexed()) return false;
1292    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1293  }
1294
1295  bool isVecListFourDHWordIndexed() const {
1296    if (!isSingleSpacedVectorIndexed()) return false;
1297    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1298  }
1299
1300  bool isVecListFourQWordIndexed() const {
1301    if (!isDoubleSpacedVectorIndexed()) return false;
1302    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1303  }
1304
1305  bool isVecListFourQHWordIndexed() const {
1306    if (!isDoubleSpacedVectorIndexed()) return false;
1307    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1308  }
1309
1310  bool isVecListFourDWordIndexed() const {
1311    if (!isSingleSpacedVectorIndexed()) return false;
1312    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1313  }
1314
1315  bool isVectorIndex8() const {
1316    if (Kind != k_VectorIndex) return false;
1317    return VectorIndex.Val < 8;
1318  }
1319  bool isVectorIndex16() const {
1320    if (Kind != k_VectorIndex) return false;
1321    return VectorIndex.Val < 4;
1322  }
1323  bool isVectorIndex32() const {
1324    if (Kind != k_VectorIndex) return false;
1325    return VectorIndex.Val < 2;
1326  }
1327
1328  bool isNEONi8splat() const {
1329    if (!isImm()) return false;
1330    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1331    // Must be a constant.
1332    if (!CE) return false;
1333    int64_t Value = CE->getValue();
1334    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1335    // value.
1336    return Value >= 0 && Value < 256;
1337  }
1338
1339  bool isNEONi16splat() const {
1340    if (!isImm()) return false;
1341    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1342    // Must be a constant.
1343    if (!CE) return false;
1344    int64_t Value = CE->getValue();
1345    // i16 value in the range [0,255] or [0x0100, 0xff00]
1346    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1347  }
1348
1349  bool isNEONi32splat() const {
1350    if (!isImm()) return false;
1351    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1352    // Must be a constant.
1353    if (!CE) return false;
1354    int64_t Value = CE->getValue();
1355    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1356    return (Value >= 0 && Value < 256) ||
1357      (Value >= 0x0100 && Value <= 0xff00) ||
1358      (Value >= 0x010000 && Value <= 0xff0000) ||
1359      (Value >= 0x01000000 && Value <= 0xff000000);
1360  }
1361
1362  bool isNEONi32vmov() const {
1363    if (!isImm()) return false;
1364    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1365    // Must be a constant.
1366    if (!CE) return false;
1367    int64_t Value = CE->getValue();
1368    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1369    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1370    return (Value >= 0 && Value < 256) ||
1371      (Value >= 0x0100 && Value <= 0xff00) ||
1372      (Value >= 0x010000 && Value <= 0xff0000) ||
1373      (Value >= 0x01000000 && Value <= 0xff000000) ||
1374      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1375      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1376  }
1377  bool isNEONi32vmovNeg() const {
1378    if (!isImm()) return false;
1379    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1380    // Must be a constant.
1381    if (!CE) return false;
1382    int64_t Value = ~CE->getValue();
1383    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1384    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1385    return (Value >= 0 && Value < 256) ||
1386      (Value >= 0x0100 && Value <= 0xff00) ||
1387      (Value >= 0x010000 && Value <= 0xff0000) ||
1388      (Value >= 0x01000000 && Value <= 0xff000000) ||
1389      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1390      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1391  }
1392
1393  bool isNEONi64splat() const {
1394    if (!isImm()) return false;
1395    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1396    // Must be a constant.
1397    if (!CE) return false;
1398    uint64_t Value = CE->getValue();
1399    // i64 value with each byte being either 0 or 0xff.
1400    for (unsigned i = 0; i < 8; ++i)
1401      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1402    return true;
1403  }
1404
1405  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1406    // Add as immediates when possible.  Null MCExpr = 0.
1407    if (Expr == 0)
1408      Inst.addOperand(MCOperand::CreateImm(0));
1409    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1410      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1411    else
1412      Inst.addOperand(MCOperand::CreateExpr(Expr));
1413  }
1414
1415  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1416    assert(N == 2 && "Invalid number of operands!");
1417    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1418    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1419    Inst.addOperand(MCOperand::CreateReg(RegNum));
1420  }
1421
1422  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1423    assert(N == 1 && "Invalid number of operands!");
1424    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1425  }
1426
1427  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1428    assert(N == 1 && "Invalid number of operands!");
1429    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1430  }
1431
1432  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1433    assert(N == 1 && "Invalid number of operands!");
1434    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1435  }
1436
1437  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1438    assert(N == 1 && "Invalid number of operands!");
1439    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1440  }
1441
1442  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1443    assert(N == 1 && "Invalid number of operands!");
1444    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1445  }
1446
1447  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1448    assert(N == 1 && "Invalid number of operands!");
1449    Inst.addOperand(MCOperand::CreateReg(getReg()));
1450  }
1451
1452  void addRegOperands(MCInst &Inst, unsigned N) const {
1453    assert(N == 1 && "Invalid number of operands!");
1454    Inst.addOperand(MCOperand::CreateReg(getReg()));
1455  }
1456
1457  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1458    assert(N == 3 && "Invalid number of operands!");
1459    assert(isRegShiftedReg() &&
1460           "addRegShiftedRegOperands() on non RegShiftedReg!");
1461    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1462    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1463    Inst.addOperand(MCOperand::CreateImm(
1464      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1465  }
1466
1467  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1468    assert(N == 2 && "Invalid number of operands!");
1469    assert(isRegShiftedImm() &&
1470           "addRegShiftedImmOperands() on non RegShiftedImm!");
1471    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1472    // Shift of #32 is encoded as 0 where permitted
1473    unsigned Imm = (RegShiftedImm.ShiftImm == 32 ? 0 : RegShiftedImm.ShiftImm);
1474    Inst.addOperand(MCOperand::CreateImm(
1475      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, Imm)));
1476  }
1477
1478  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1479    assert(N == 1 && "Invalid number of operands!");
1480    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1481                                         ShifterImm.Imm));
1482  }
1483
1484  void addRegListOperands(MCInst &Inst, unsigned N) const {
1485    assert(N == 1 && "Invalid number of operands!");
1486    const SmallVectorImpl<unsigned> &RegList = getRegList();
1487    for (SmallVectorImpl<unsigned>::const_iterator
1488           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1489      Inst.addOperand(MCOperand::CreateReg(*I));
1490  }
1491
1492  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1493    addRegListOperands(Inst, N);
1494  }
1495
1496  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1497    addRegListOperands(Inst, N);
1498  }
1499
1500  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1501    assert(N == 1 && "Invalid number of operands!");
1502    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1503    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1504  }
1505
1506  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1507    assert(N == 1 && "Invalid number of operands!");
1508    // Munge the lsb/width into a bitfield mask.
1509    unsigned lsb = Bitfield.LSB;
1510    unsigned width = Bitfield.Width;
1511    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1512    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1513                      (32 - (lsb + width)));
1514    Inst.addOperand(MCOperand::CreateImm(Mask));
1515  }
1516
1517  void addImmOperands(MCInst &Inst, unsigned N) const {
1518    assert(N == 1 && "Invalid number of operands!");
1519    addExpr(Inst, getImm());
1520  }
1521
1522  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1523    assert(N == 1 && "Invalid number of operands!");
1524    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1525    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1526  }
1527
1528  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1529    assert(N == 1 && "Invalid number of operands!");
1530    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1531    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1532  }
1533
1534  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1535    assert(N == 1 && "Invalid number of operands!");
1536    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1537    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1538    Inst.addOperand(MCOperand::CreateImm(Val));
1539  }
1540
1541  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1542    assert(N == 1 && "Invalid number of operands!");
1543    // FIXME: We really want to scale the value here, but the LDRD/STRD
1544    // instruction don't encode operands that way yet.
1545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1546    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1547  }
1548
1549  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1550    assert(N == 1 && "Invalid number of operands!");
1551    // The immediate is scaled by four in the encoding and is stored
1552    // in the MCInst as such. Lop off the low two bits here.
1553    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1554    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1555  }
1556
1557  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1558    assert(N == 1 && "Invalid number of operands!");
1559    // The immediate is scaled by four in the encoding and is stored
1560    // in the MCInst as such. Lop off the low two bits here.
1561    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1562    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1563  }
1564
1565  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1566    assert(N == 1 && "Invalid number of operands!");
1567    // The immediate is scaled by four in the encoding and is stored
1568    // in the MCInst as such. Lop off the low two bits here.
1569    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1570    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1571  }
1572
1573  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1574    assert(N == 1 && "Invalid number of operands!");
1575    // The constant encodes as the immediate-1, and we store in the instruction
1576    // the bits as encoded, so subtract off one here.
1577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1578    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1579  }
1580
1581  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1582    assert(N == 1 && "Invalid number of operands!");
1583    // The constant encodes as the immediate-1, and we store in the instruction
1584    // the bits as encoded, so subtract off one here.
1585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1586    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1587  }
1588
1589  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1590    assert(N == 1 && "Invalid number of operands!");
1591    // The constant encodes as the immediate, except for 32, which encodes as
1592    // zero.
1593    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1594    unsigned Imm = CE->getValue();
1595    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1596  }
1597
1598  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1599    assert(N == 1 && "Invalid number of operands!");
1600    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1601    // the instruction as well.
1602    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1603    int Val = CE->getValue();
1604    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1605  }
1606
1607  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1608    assert(N == 1 && "Invalid number of operands!");
1609    // The operand is actually a t2_so_imm, but we have its bitwise
1610    // negation in the assembly source, so twiddle it here.
1611    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1612    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1613  }
1614
1615  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1616    assert(N == 1 && "Invalid number of operands!");
1617    // The operand is actually a t2_so_imm, but we have its
1618    // negation in the assembly source, so twiddle it here.
1619    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1620    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1621  }
1622
1623  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1624    assert(N == 1 && "Invalid number of operands!");
1625    // The operand is actually an imm0_4095, but we have its
1626    // negation in the assembly source, so twiddle it here.
1627    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1628    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1629  }
1630
1631  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1632    assert(N == 1 && "Invalid number of operands!");
1633    // The operand is actually a so_imm, but we have its bitwise
1634    // negation in the assembly source, so twiddle it here.
1635    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1636    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1637  }
1638
1639  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1640    assert(N == 1 && "Invalid number of operands!");
1641    // The operand is actually a so_imm, but we have its
1642    // negation in the assembly source, so twiddle it here.
1643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1644    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1645  }
1646
1647  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1648    assert(N == 1 && "Invalid number of operands!");
1649    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1650  }
1651
1652  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1653    assert(N == 1 && "Invalid number of operands!");
1654    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1655  }
1656
1657  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1658    assert(N == 1 && "Invalid number of operands!");
1659    int32_t Imm = Memory.OffsetImm->getValue();
1660    // FIXME: Handle #-0
1661    if (Imm == INT32_MIN) Imm = 0;
1662    Inst.addOperand(MCOperand::CreateImm(Imm));
1663  }
1664
1665  void addAdrLabelOperands(MCInst &Inst, unsigned N) const {
1666    assert(N == 1 && "Invalid number of operands!");
1667    assert(isImm() && "Not an immediate!");
1668
1669    // If we have an immediate that's not a constant, treat it as a label
1670    // reference needing a fixup.
1671    if (!isa<MCConstantExpr>(getImm())) {
1672      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1673      return;
1674    }
1675
1676    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1677    int Val = CE->getValue();
1678    Inst.addOperand(MCOperand::CreateImm(Val));
1679  }
1680
1681  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1682    assert(N == 2 && "Invalid number of operands!");
1683    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1684    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1685  }
1686
1687  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1688    assert(N == 3 && "Invalid number of operands!");
1689    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1690    if (!Memory.OffsetRegNum) {
1691      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1692      // Special case for #-0
1693      if (Val == INT32_MIN) Val = 0;
1694      if (Val < 0) Val = -Val;
1695      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1696    } else {
1697      // For register offset, we encode the shift type and negation flag
1698      // here.
1699      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1700                              Memory.ShiftImm, Memory.ShiftType);
1701    }
1702    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1703    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1704    Inst.addOperand(MCOperand::CreateImm(Val));
1705  }
1706
1707  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1708    assert(N == 2 && "Invalid number of operands!");
1709    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1710    assert(CE && "non-constant AM2OffsetImm operand!");
1711    int32_t Val = CE->getValue();
1712    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1713    // Special case for #-0
1714    if (Val == INT32_MIN) Val = 0;
1715    if (Val < 0) Val = -Val;
1716    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1717    Inst.addOperand(MCOperand::CreateReg(0));
1718    Inst.addOperand(MCOperand::CreateImm(Val));
1719  }
1720
1721  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1722    assert(N == 3 && "Invalid number of operands!");
1723    // If we have an immediate that's not a constant, treat it as a label
1724    // reference needing a fixup. If it is a constant, it's something else
1725    // and we reject it.
1726    if (isImm()) {
1727      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1728      Inst.addOperand(MCOperand::CreateReg(0));
1729      Inst.addOperand(MCOperand::CreateImm(0));
1730      return;
1731    }
1732
1733    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1734    if (!Memory.OffsetRegNum) {
1735      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1736      // Special case for #-0
1737      if (Val == INT32_MIN) Val = 0;
1738      if (Val < 0) Val = -Val;
1739      Val = ARM_AM::getAM3Opc(AddSub, Val);
1740    } else {
1741      // For register offset, we encode the shift type and negation flag
1742      // here.
1743      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1744    }
1745    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1746    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1747    Inst.addOperand(MCOperand::CreateImm(Val));
1748  }
1749
1750  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1751    assert(N == 2 && "Invalid number of operands!");
1752    if (Kind == k_PostIndexRegister) {
1753      int32_t Val =
1754        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1755      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1756      Inst.addOperand(MCOperand::CreateImm(Val));
1757      return;
1758    }
1759
1760    // Constant offset.
1761    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1762    int32_t Val = CE->getValue();
1763    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1764    // Special case for #-0
1765    if (Val == INT32_MIN) Val = 0;
1766    if (Val < 0) Val = -Val;
1767    Val = ARM_AM::getAM3Opc(AddSub, Val);
1768    Inst.addOperand(MCOperand::CreateReg(0));
1769    Inst.addOperand(MCOperand::CreateImm(Val));
1770  }
1771
1772  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1773    assert(N == 2 && "Invalid number of operands!");
1774    // If we have an immediate that's not a constant, treat it as a label
1775    // reference needing a fixup. If it is a constant, it's something else
1776    // and we reject it.
1777    if (isImm()) {
1778      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1779      Inst.addOperand(MCOperand::CreateImm(0));
1780      return;
1781    }
1782
1783    // The lower two bits are always zero and as such are not encoded.
1784    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1785    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1786    // Special case for #-0
1787    if (Val == INT32_MIN) Val = 0;
1788    if (Val < 0) Val = -Val;
1789    Val = ARM_AM::getAM5Opc(AddSub, Val);
1790    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1791    Inst.addOperand(MCOperand::CreateImm(Val));
1792  }
1793
1794  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 2 && "Invalid number of operands!");
1796    // If we have an immediate that's not a constant, treat it as a label
1797    // reference needing a fixup. If it is a constant, it's something else
1798    // and we reject it.
1799    if (isImm()) {
1800      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1801      Inst.addOperand(MCOperand::CreateImm(0));
1802      return;
1803    }
1804
1805    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1806    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1807    Inst.addOperand(MCOperand::CreateImm(Val));
1808  }
1809
1810  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1811    assert(N == 2 && "Invalid number of operands!");
1812    // The lower two bits are always zero and as such are not encoded.
1813    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1814    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1815    Inst.addOperand(MCOperand::CreateImm(Val));
1816  }
1817
1818  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1819    assert(N == 2 && "Invalid number of operands!");
1820    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1821    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1822    Inst.addOperand(MCOperand::CreateImm(Val));
1823  }
1824
1825  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1826    addMemImm8OffsetOperands(Inst, N);
1827  }
1828
1829  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1830    addMemImm8OffsetOperands(Inst, N);
1831  }
1832
1833  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1834    assert(N == 2 && "Invalid number of operands!");
1835    // If this is an immediate, it's a label reference.
1836    if (isImm()) {
1837      addExpr(Inst, getImm());
1838      Inst.addOperand(MCOperand::CreateImm(0));
1839      return;
1840    }
1841
1842    // Otherwise, it's a normal memory reg+offset.
1843    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1844    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1845    Inst.addOperand(MCOperand::CreateImm(Val));
1846  }
1847
1848  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1849    assert(N == 2 && "Invalid number of operands!");
1850    // If this is an immediate, it's a label reference.
1851    if (isImm()) {
1852      addExpr(Inst, getImm());
1853      Inst.addOperand(MCOperand::CreateImm(0));
1854      return;
1855    }
1856
1857    // Otherwise, it's a normal memory reg+offset.
1858    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1859    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1860    Inst.addOperand(MCOperand::CreateImm(Val));
1861  }
1862
1863  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1864    assert(N == 2 && "Invalid number of operands!");
1865    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1866    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1867  }
1868
1869  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1870    assert(N == 2 && "Invalid number of operands!");
1871    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1872    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1873  }
1874
1875  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1876    assert(N == 3 && "Invalid number of operands!");
1877    unsigned Val =
1878      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1879                        Memory.ShiftImm, Memory.ShiftType);
1880    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1881    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1882    Inst.addOperand(MCOperand::CreateImm(Val));
1883  }
1884
1885  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1886    assert(N == 3 && "Invalid number of operands!");
1887    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1888    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1889    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1890  }
1891
1892  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1893    assert(N == 2 && "Invalid number of operands!");
1894    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1895    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1896  }
1897
1898  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1899    assert(N == 2 && "Invalid number of operands!");
1900    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1901    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1902    Inst.addOperand(MCOperand::CreateImm(Val));
1903  }
1904
1905  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1906    assert(N == 2 && "Invalid number of operands!");
1907    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1908    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1909    Inst.addOperand(MCOperand::CreateImm(Val));
1910  }
1911
1912  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1913    assert(N == 2 && "Invalid number of operands!");
1914    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1915    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1916    Inst.addOperand(MCOperand::CreateImm(Val));
1917  }
1918
1919  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1920    assert(N == 2 && "Invalid number of operands!");
1921    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1922    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1923    Inst.addOperand(MCOperand::CreateImm(Val));
1924  }
1925
1926  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1927    assert(N == 1 && "Invalid number of operands!");
1928    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1929    assert(CE && "non-constant post-idx-imm8 operand!");
1930    int Imm = CE->getValue();
1931    bool isAdd = Imm >= 0;
1932    if (Imm == INT32_MIN) Imm = 0;
1933    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1934    Inst.addOperand(MCOperand::CreateImm(Imm));
1935  }
1936
1937  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1938    assert(N == 1 && "Invalid number of operands!");
1939    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1940    assert(CE && "non-constant post-idx-imm8s4 operand!");
1941    int Imm = CE->getValue();
1942    bool isAdd = Imm >= 0;
1943    if (Imm == INT32_MIN) Imm = 0;
1944    // Immediate is scaled by 4.
1945    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1946    Inst.addOperand(MCOperand::CreateImm(Imm));
1947  }
1948
1949  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1950    assert(N == 2 && "Invalid number of operands!");
1951    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1952    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1953  }
1954
1955  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1956    assert(N == 2 && "Invalid number of operands!");
1957    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1958    // The sign, shift type, and shift amount are encoded in a single operand
1959    // using the AM2 encoding helpers.
1960    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1961    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1962                                     PostIdxReg.ShiftTy);
1963    Inst.addOperand(MCOperand::CreateImm(Imm));
1964  }
1965
1966  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1967    assert(N == 1 && "Invalid number of operands!");
1968    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1969  }
1970
1971  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1972    assert(N == 1 && "Invalid number of operands!");
1973    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1974  }
1975
1976  void addVecListOperands(MCInst &Inst, unsigned N) const {
1977    assert(N == 1 && "Invalid number of operands!");
1978    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1979  }
1980
1981  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1982    assert(N == 2 && "Invalid number of operands!");
1983    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1984    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1985  }
1986
1987  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1988    assert(N == 1 && "Invalid number of operands!");
1989    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1990  }
1991
1992  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1993    assert(N == 1 && "Invalid number of operands!");
1994    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1995  }
1996
1997  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1998    assert(N == 1 && "Invalid number of operands!");
1999    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
2000  }
2001
2002  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
2003    assert(N == 1 && "Invalid number of operands!");
2004    // The immediate encodes the type of constant as well as the value.
2005    // Mask in that this is an i8 splat.
2006    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2007    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
2008  }
2009
2010  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
2011    assert(N == 1 && "Invalid number of operands!");
2012    // The immediate encodes the type of constant as well as the value.
2013    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2014    unsigned Value = CE->getValue();
2015    if (Value >= 256)
2016      Value = (Value >> 8) | 0xa00;
2017    else
2018      Value |= 0x800;
2019    Inst.addOperand(MCOperand::CreateImm(Value));
2020  }
2021
2022  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
2023    assert(N == 1 && "Invalid number of operands!");
2024    // The immediate encodes the type of constant as well as the value.
2025    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2026    unsigned Value = CE->getValue();
2027    if (Value >= 256 && Value <= 0xff00)
2028      Value = (Value >> 8) | 0x200;
2029    else if (Value > 0xffff && Value <= 0xff0000)
2030      Value = (Value >> 16) | 0x400;
2031    else if (Value > 0xffffff)
2032      Value = (Value >> 24) | 0x600;
2033    Inst.addOperand(MCOperand::CreateImm(Value));
2034  }
2035
2036  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
2037    assert(N == 1 && "Invalid number of operands!");
2038    // The immediate encodes the type of constant as well as the value.
2039    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2040    unsigned Value = CE->getValue();
2041    if (Value >= 256 && Value <= 0xffff)
2042      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2043    else if (Value > 0xffff && Value <= 0xffffff)
2044      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2045    else if (Value > 0xffffff)
2046      Value = (Value >> 24) | 0x600;
2047    Inst.addOperand(MCOperand::CreateImm(Value));
2048  }
2049
2050  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2051    assert(N == 1 && "Invalid number of operands!");
2052    // The immediate encodes the type of constant as well as the value.
2053    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2054    unsigned Value = ~CE->getValue();
2055    if (Value >= 256 && Value <= 0xffff)
2056      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2057    else if (Value > 0xffff && Value <= 0xffffff)
2058      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2059    else if (Value > 0xffffff)
2060      Value = (Value >> 24) | 0x600;
2061    Inst.addOperand(MCOperand::CreateImm(Value));
2062  }
2063
2064  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2065    assert(N == 1 && "Invalid number of operands!");
2066    // The immediate encodes the type of constant as well as the value.
2067    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2068    uint64_t Value = CE->getValue();
2069    unsigned Imm = 0;
2070    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2071      Imm |= (Value & 1) << i;
2072    }
2073    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2074  }
2075
2076  virtual void print(raw_ostream &OS) const;
2077
2078  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2079    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2080    Op->ITMask.Mask = Mask;
2081    Op->StartLoc = S;
2082    Op->EndLoc = S;
2083    return Op;
2084  }
2085
2086  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2087    ARMOperand *Op = new ARMOperand(k_CondCode);
2088    Op->CC.Val = CC;
2089    Op->StartLoc = S;
2090    Op->EndLoc = S;
2091    return Op;
2092  }
2093
2094  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2095    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2096    Op->Cop.Val = CopVal;
2097    Op->StartLoc = S;
2098    Op->EndLoc = S;
2099    return Op;
2100  }
2101
2102  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2103    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2104    Op->Cop.Val = CopVal;
2105    Op->StartLoc = S;
2106    Op->EndLoc = S;
2107    return Op;
2108  }
2109
2110  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2111    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2112    Op->Cop.Val = Val;
2113    Op->StartLoc = S;
2114    Op->EndLoc = E;
2115    return Op;
2116  }
2117
2118  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2119    ARMOperand *Op = new ARMOperand(k_CCOut);
2120    Op->Reg.RegNum = RegNum;
2121    Op->StartLoc = S;
2122    Op->EndLoc = S;
2123    return Op;
2124  }
2125
2126  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2127    ARMOperand *Op = new ARMOperand(k_Token);
2128    Op->Tok.Data = Str.data();
2129    Op->Tok.Length = Str.size();
2130    Op->StartLoc = S;
2131    Op->EndLoc = S;
2132    return Op;
2133  }
2134
2135  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2136    ARMOperand *Op = new ARMOperand(k_Register);
2137    Op->Reg.RegNum = RegNum;
2138    Op->StartLoc = S;
2139    Op->EndLoc = E;
2140    return Op;
2141  }
2142
2143  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2144                                           unsigned SrcReg,
2145                                           unsigned ShiftReg,
2146                                           unsigned ShiftImm,
2147                                           SMLoc S, SMLoc E) {
2148    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2149    Op->RegShiftedReg.ShiftTy = ShTy;
2150    Op->RegShiftedReg.SrcReg = SrcReg;
2151    Op->RegShiftedReg.ShiftReg = ShiftReg;
2152    Op->RegShiftedReg.ShiftImm = ShiftImm;
2153    Op->StartLoc = S;
2154    Op->EndLoc = E;
2155    return Op;
2156  }
2157
2158  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2159                                            unsigned SrcReg,
2160                                            unsigned ShiftImm,
2161                                            SMLoc S, SMLoc E) {
2162    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2163    Op->RegShiftedImm.ShiftTy = ShTy;
2164    Op->RegShiftedImm.SrcReg = SrcReg;
2165    Op->RegShiftedImm.ShiftImm = ShiftImm;
2166    Op->StartLoc = S;
2167    Op->EndLoc = E;
2168    return Op;
2169  }
2170
2171  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2172                                   SMLoc S, SMLoc E) {
2173    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2174    Op->ShifterImm.isASR = isASR;
2175    Op->ShifterImm.Imm = Imm;
2176    Op->StartLoc = S;
2177    Op->EndLoc = E;
2178    return Op;
2179  }
2180
2181  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2182    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2183    Op->RotImm.Imm = Imm;
2184    Op->StartLoc = S;
2185    Op->EndLoc = E;
2186    return Op;
2187  }
2188
2189  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2190                                    SMLoc S, SMLoc E) {
2191    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2192    Op->Bitfield.LSB = LSB;
2193    Op->Bitfield.Width = Width;
2194    Op->StartLoc = S;
2195    Op->EndLoc = E;
2196    return Op;
2197  }
2198
2199  static ARMOperand *
2200  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2201                SMLoc StartLoc, SMLoc EndLoc) {
2202    KindTy Kind = k_RegisterList;
2203
2204    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2205      Kind = k_DPRRegisterList;
2206    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2207             contains(Regs.front().first))
2208      Kind = k_SPRRegisterList;
2209
2210    ARMOperand *Op = new ARMOperand(Kind);
2211    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2212           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2213      Op->Registers.push_back(I->first);
2214    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2215    Op->StartLoc = StartLoc;
2216    Op->EndLoc = EndLoc;
2217    return Op;
2218  }
2219
2220  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2221                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2222    ARMOperand *Op = new ARMOperand(k_VectorList);
2223    Op->VectorList.RegNum = RegNum;
2224    Op->VectorList.Count = Count;
2225    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2226    Op->StartLoc = S;
2227    Op->EndLoc = E;
2228    return Op;
2229  }
2230
2231  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2232                                              bool isDoubleSpaced,
2233                                              SMLoc S, SMLoc E) {
2234    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2235    Op->VectorList.RegNum = RegNum;
2236    Op->VectorList.Count = Count;
2237    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2238    Op->StartLoc = S;
2239    Op->EndLoc = E;
2240    return Op;
2241  }
2242
2243  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2244                                             unsigned Index,
2245                                             bool isDoubleSpaced,
2246                                             SMLoc S, SMLoc E) {
2247    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2248    Op->VectorList.RegNum = RegNum;
2249    Op->VectorList.Count = Count;
2250    Op->VectorList.LaneIndex = Index;
2251    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2252    Op->StartLoc = S;
2253    Op->EndLoc = E;
2254    return Op;
2255  }
2256
2257  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2258                                       MCContext &Ctx) {
2259    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2260    Op->VectorIndex.Val = Idx;
2261    Op->StartLoc = S;
2262    Op->EndLoc = E;
2263    return Op;
2264  }
2265
2266  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2267    ARMOperand *Op = new ARMOperand(k_Immediate);
2268    Op->Imm.Val = Val;
2269    Op->StartLoc = S;
2270    Op->EndLoc = E;
2271    return Op;
2272  }
2273
2274  static ARMOperand *CreateMem(unsigned BaseRegNum,
2275                               const MCConstantExpr *OffsetImm,
2276                               unsigned OffsetRegNum,
2277                               ARM_AM::ShiftOpc ShiftType,
2278                               unsigned ShiftImm,
2279                               unsigned Alignment,
2280                               bool isNegative,
2281                               SMLoc S, SMLoc E) {
2282    ARMOperand *Op = new ARMOperand(k_Memory);
2283    Op->Memory.BaseRegNum = BaseRegNum;
2284    Op->Memory.OffsetImm = OffsetImm;
2285    Op->Memory.OffsetRegNum = OffsetRegNum;
2286    Op->Memory.ShiftType = ShiftType;
2287    Op->Memory.ShiftImm = ShiftImm;
2288    Op->Memory.Alignment = Alignment;
2289    Op->Memory.isNegative = isNegative;
2290    Op->StartLoc = S;
2291    Op->EndLoc = E;
2292    return Op;
2293  }
2294
2295  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2296                                      ARM_AM::ShiftOpc ShiftTy,
2297                                      unsigned ShiftImm,
2298                                      SMLoc S, SMLoc E) {
2299    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2300    Op->PostIdxReg.RegNum = RegNum;
2301    Op->PostIdxReg.isAdd = isAdd;
2302    Op->PostIdxReg.ShiftTy = ShiftTy;
2303    Op->PostIdxReg.ShiftImm = ShiftImm;
2304    Op->StartLoc = S;
2305    Op->EndLoc = E;
2306    return Op;
2307  }
2308
2309  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2310    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2311    Op->MBOpt.Val = Opt;
2312    Op->StartLoc = S;
2313    Op->EndLoc = S;
2314    return Op;
2315  }
2316
2317  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2318    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2319    Op->IFlags.Val = IFlags;
2320    Op->StartLoc = S;
2321    Op->EndLoc = S;
2322    return Op;
2323  }
2324
2325  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2326    ARMOperand *Op = new ARMOperand(k_MSRMask);
2327    Op->MMask.Val = MMask;
2328    Op->StartLoc = S;
2329    Op->EndLoc = S;
2330    return Op;
2331  }
2332};
2333
2334} // end anonymous namespace.
2335
2336void ARMOperand::print(raw_ostream &OS) const {
2337  switch (Kind) {
2338  case k_CondCode:
2339    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2340    break;
2341  case k_CCOut:
2342    OS << "<ccout " << getReg() << ">";
2343    break;
2344  case k_ITCondMask: {
2345    static const char *const MaskStr[] = {
2346      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2347      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2348    };
2349    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2350    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2351    break;
2352  }
2353  case k_CoprocNum:
2354    OS << "<coprocessor number: " << getCoproc() << ">";
2355    break;
2356  case k_CoprocReg:
2357    OS << "<coprocessor register: " << getCoproc() << ">";
2358    break;
2359  case k_CoprocOption:
2360    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2361    break;
2362  case k_MSRMask:
2363    OS << "<mask: " << getMSRMask() << ">";
2364    break;
2365  case k_Immediate:
2366    getImm()->print(OS);
2367    break;
2368  case k_MemBarrierOpt:
2369    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2370    break;
2371  case k_Memory:
2372    OS << "<memory "
2373       << " base:" << Memory.BaseRegNum;
2374    OS << ">";
2375    break;
2376  case k_PostIndexRegister:
2377    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2378       << PostIdxReg.RegNum;
2379    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2380      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2381         << PostIdxReg.ShiftImm;
2382    OS << ">";
2383    break;
2384  case k_ProcIFlags: {
2385    OS << "<ARM_PROC::";
2386    unsigned IFlags = getProcIFlags();
2387    for (int i=2; i >= 0; --i)
2388      if (IFlags & (1 << i))
2389        OS << ARM_PROC::IFlagsToString(1 << i);
2390    OS << ">";
2391    break;
2392  }
2393  case k_Register:
2394    OS << "<register " << getReg() << ">";
2395    break;
2396  case k_ShifterImmediate:
2397    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2398       << " #" << ShifterImm.Imm << ">";
2399    break;
2400  case k_ShiftedRegister:
2401    OS << "<so_reg_reg "
2402       << RegShiftedReg.SrcReg << " "
2403       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2404       << " " << RegShiftedReg.ShiftReg << ">";
2405    break;
2406  case k_ShiftedImmediate:
2407    OS << "<so_reg_imm "
2408       << RegShiftedImm.SrcReg << " "
2409       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2410       << " #" << RegShiftedImm.ShiftImm << ">";
2411    break;
2412  case k_RotateImmediate:
2413    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2414    break;
2415  case k_BitfieldDescriptor:
2416    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2417       << ", width: " << Bitfield.Width << ">";
2418    break;
2419  case k_RegisterList:
2420  case k_DPRRegisterList:
2421  case k_SPRRegisterList: {
2422    OS << "<register_list ";
2423
2424    const SmallVectorImpl<unsigned> &RegList = getRegList();
2425    for (SmallVectorImpl<unsigned>::const_iterator
2426           I = RegList.begin(), E = RegList.end(); I != E; ) {
2427      OS << *I;
2428      if (++I < E) OS << ", ";
2429    }
2430
2431    OS << ">";
2432    break;
2433  }
2434  case k_VectorList:
2435    OS << "<vector_list " << VectorList.Count << " * "
2436       << VectorList.RegNum << ">";
2437    break;
2438  case k_VectorListAllLanes:
2439    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2440       << VectorList.RegNum << ">";
2441    break;
2442  case k_VectorListIndexed:
2443    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2444       << VectorList.Count << " * " << VectorList.RegNum << ">";
2445    break;
2446  case k_Token:
2447    OS << "'" << getToken() << "'";
2448    break;
2449  case k_VectorIndex:
2450    OS << "<vectorindex " << getVectorIndex() << ">";
2451    break;
2452  }
2453}
2454
2455/// @name Auto-generated Match Functions
2456/// {
2457
2458static unsigned MatchRegisterName(StringRef Name);
2459
2460/// }
2461
2462bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2463                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2464  StartLoc = Parser.getTok().getLoc();
2465  EndLoc = Parser.getTok().getEndLoc();
2466  RegNo = tryParseRegister();
2467
2468  return (RegNo == (unsigned)-1);
2469}
2470
2471/// Try to parse a register name.  The token must be an Identifier when called,
2472/// and if it is a register name the token is eaten and the register number is
2473/// returned.  Otherwise return -1.
2474///
2475int ARMAsmParser::tryParseRegister() {
2476  const AsmToken &Tok = Parser.getTok();
2477  if (Tok.isNot(AsmToken::Identifier)) return -1;
2478
2479  std::string lowerCase = Tok.getString().lower();
2480  unsigned RegNum = MatchRegisterName(lowerCase);
2481  if (!RegNum) {
2482    RegNum = StringSwitch<unsigned>(lowerCase)
2483      .Case("r13", ARM::SP)
2484      .Case("r14", ARM::LR)
2485      .Case("r15", ARM::PC)
2486      .Case("ip", ARM::R12)
2487      // Additional register name aliases for 'gas' compatibility.
2488      .Case("a1", ARM::R0)
2489      .Case("a2", ARM::R1)
2490      .Case("a3", ARM::R2)
2491      .Case("a4", ARM::R3)
2492      .Case("v1", ARM::R4)
2493      .Case("v2", ARM::R5)
2494      .Case("v3", ARM::R6)
2495      .Case("v4", ARM::R7)
2496      .Case("v5", ARM::R8)
2497      .Case("v6", ARM::R9)
2498      .Case("v7", ARM::R10)
2499      .Case("v8", ARM::R11)
2500      .Case("sb", ARM::R9)
2501      .Case("sl", ARM::R10)
2502      .Case("fp", ARM::R11)
2503      .Default(0);
2504  }
2505  if (!RegNum) {
2506    // Check for aliases registered via .req. Canonicalize to lower case.
2507    // That's more consistent since register names are case insensitive, and
2508    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2509    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2510    // If no match, return failure.
2511    if (Entry == RegisterReqs.end())
2512      return -1;
2513    Parser.Lex(); // Eat identifier token.
2514    return Entry->getValue();
2515  }
2516
2517  Parser.Lex(); // Eat identifier token.
2518
2519  return RegNum;
2520}
2521
2522// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2523// If a recoverable error occurs, return 1. If an irrecoverable error
2524// occurs, return -1. An irrecoverable error is one where tokens have been
2525// consumed in the process of trying to parse the shifter (i.e., when it is
2526// indeed a shifter operand, but malformed).
2527int ARMAsmParser::tryParseShiftRegister(
2528                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2529  SMLoc S = Parser.getTok().getLoc();
2530  const AsmToken &Tok = Parser.getTok();
2531  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2532
2533  std::string lowerCase = Tok.getString().lower();
2534  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2535      .Case("asl", ARM_AM::lsl)
2536      .Case("lsl", ARM_AM::lsl)
2537      .Case("lsr", ARM_AM::lsr)
2538      .Case("asr", ARM_AM::asr)
2539      .Case("ror", ARM_AM::ror)
2540      .Case("rrx", ARM_AM::rrx)
2541      .Default(ARM_AM::no_shift);
2542
2543  if (ShiftTy == ARM_AM::no_shift)
2544    return 1;
2545
2546  Parser.Lex(); // Eat the operator.
2547
2548  // The source register for the shift has already been added to the
2549  // operand list, so we need to pop it off and combine it into the shifted
2550  // register operand instead.
2551  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2552  if (!PrevOp->isReg())
2553    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2554  int SrcReg = PrevOp->getReg();
2555
2556  SMLoc EndLoc;
2557  int64_t Imm = 0;
2558  int ShiftReg = 0;
2559  if (ShiftTy == ARM_AM::rrx) {
2560    // RRX Doesn't have an explicit shift amount. The encoder expects
2561    // the shift register to be the same as the source register. Seems odd,
2562    // but OK.
2563    ShiftReg = SrcReg;
2564  } else {
2565    // Figure out if this is shifted by a constant or a register (for non-RRX).
2566    if (Parser.getTok().is(AsmToken::Hash) ||
2567        Parser.getTok().is(AsmToken::Dollar)) {
2568      Parser.Lex(); // Eat hash.
2569      SMLoc ImmLoc = Parser.getTok().getLoc();
2570      const MCExpr *ShiftExpr = 0;
2571      if (getParser().ParseExpression(ShiftExpr, EndLoc)) {
2572        Error(ImmLoc, "invalid immediate shift value");
2573        return -1;
2574      }
2575      // The expression must be evaluatable as an immediate.
2576      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2577      if (!CE) {
2578        Error(ImmLoc, "invalid immediate shift value");
2579        return -1;
2580      }
2581      // Range check the immediate.
2582      // lsl, ror: 0 <= imm <= 31
2583      // lsr, asr: 0 <= imm <= 32
2584      Imm = CE->getValue();
2585      if (Imm < 0 ||
2586          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2587          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2588        Error(ImmLoc, "immediate shift value out of range");
2589        return -1;
2590      }
2591      // shift by zero is a nop. Always send it through as lsl.
2592      // ('as' compatibility)
2593      if (Imm == 0)
2594        ShiftTy = ARM_AM::lsl;
2595    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2596      SMLoc L = Parser.getTok().getLoc();
2597      EndLoc = Parser.getTok().getEndLoc();
2598      ShiftReg = tryParseRegister();
2599      if (ShiftReg == -1) {
2600        Error (L, "expected immediate or register in shift operand");
2601        return -1;
2602      }
2603    } else {
2604      Error (Parser.getTok().getLoc(),
2605                    "expected immediate or register in shift operand");
2606      return -1;
2607    }
2608  }
2609
2610  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2611    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2612                                                         ShiftReg, Imm,
2613                                                         S, EndLoc));
2614  else
2615    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2616                                                          S, EndLoc));
2617
2618  return 0;
2619}
2620
2621
2622/// Try to parse a register name.  The token must be an Identifier when called.
2623/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2624/// if there is a "writeback". 'true' if it's not a register.
2625///
2626/// TODO this is likely to change to allow different register types and or to
2627/// parse for a specific register type.
2628bool ARMAsmParser::
2629tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2630  const AsmToken &RegTok = Parser.getTok();
2631  int RegNo = tryParseRegister();
2632  if (RegNo == -1)
2633    return true;
2634
2635  Operands.push_back(ARMOperand::CreateReg(RegNo, RegTok.getLoc(),
2636                                           RegTok.getEndLoc()));
2637
2638  const AsmToken &ExclaimTok = Parser.getTok();
2639  if (ExclaimTok.is(AsmToken::Exclaim)) {
2640    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2641                                               ExclaimTok.getLoc()));
2642    Parser.Lex(); // Eat exclaim token
2643    return false;
2644  }
2645
2646  // Also check for an index operand. This is only legal for vector registers,
2647  // but that'll get caught OK in operand matching, so we don't need to
2648  // explicitly filter everything else out here.
2649  if (Parser.getTok().is(AsmToken::LBrac)) {
2650    SMLoc SIdx = Parser.getTok().getLoc();
2651    Parser.Lex(); // Eat left bracket token.
2652
2653    const MCExpr *ImmVal;
2654    if (getParser().ParseExpression(ImmVal))
2655      return true;
2656    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2657    if (!MCE)
2658      return TokError("immediate value expected for vector index");
2659
2660    if (Parser.getTok().isNot(AsmToken::RBrac))
2661      return Error(Parser.getTok().getLoc(), "']' expected");
2662
2663    SMLoc E = Parser.getTok().getEndLoc();
2664    Parser.Lex(); // Eat right bracket token.
2665
2666    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2667                                                     SIdx, E,
2668                                                     getContext()));
2669  }
2670
2671  return false;
2672}
2673
2674/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2675/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2676/// "c5", ...
2677static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2678  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2679  // but efficient.
2680  switch (Name.size()) {
2681  default: return -1;
2682  case 2:
2683    if (Name[0] != CoprocOp)
2684      return -1;
2685    switch (Name[1]) {
2686    default:  return -1;
2687    case '0': return 0;
2688    case '1': return 1;
2689    case '2': return 2;
2690    case '3': return 3;
2691    case '4': return 4;
2692    case '5': return 5;
2693    case '6': return 6;
2694    case '7': return 7;
2695    case '8': return 8;
2696    case '9': return 9;
2697    }
2698  case 3:
2699    if (Name[0] != CoprocOp || Name[1] != '1')
2700      return -1;
2701    switch (Name[2]) {
2702    default:  return -1;
2703    case '0': return 10;
2704    case '1': return 11;
2705    case '2': return 12;
2706    case '3': return 13;
2707    case '4': return 14;
2708    case '5': return 15;
2709    }
2710  }
2711}
2712
2713/// parseITCondCode - Try to parse a condition code for an IT instruction.
2714ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2715parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2716  SMLoc S = Parser.getTok().getLoc();
2717  const AsmToken &Tok = Parser.getTok();
2718  if (!Tok.is(AsmToken::Identifier))
2719    return MatchOperand_NoMatch;
2720  unsigned CC = StringSwitch<unsigned>(Tok.getString().lower())
2721    .Case("eq", ARMCC::EQ)
2722    .Case("ne", ARMCC::NE)
2723    .Case("hs", ARMCC::HS)
2724    .Case("cs", ARMCC::HS)
2725    .Case("lo", ARMCC::LO)
2726    .Case("cc", ARMCC::LO)
2727    .Case("mi", ARMCC::MI)
2728    .Case("pl", ARMCC::PL)
2729    .Case("vs", ARMCC::VS)
2730    .Case("vc", ARMCC::VC)
2731    .Case("hi", ARMCC::HI)
2732    .Case("ls", ARMCC::LS)
2733    .Case("ge", ARMCC::GE)
2734    .Case("lt", ARMCC::LT)
2735    .Case("gt", ARMCC::GT)
2736    .Case("le", ARMCC::LE)
2737    .Case("al", ARMCC::AL)
2738    .Default(~0U);
2739  if (CC == ~0U)
2740    return MatchOperand_NoMatch;
2741  Parser.Lex(); // Eat the token.
2742
2743  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2744
2745  return MatchOperand_Success;
2746}
2747
2748/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2749/// token must be an Identifier when called, and if it is a coprocessor
2750/// number, the token is eaten and the operand is added to the operand list.
2751ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2752parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2753  SMLoc S = Parser.getTok().getLoc();
2754  const AsmToken &Tok = Parser.getTok();
2755  if (Tok.isNot(AsmToken::Identifier))
2756    return MatchOperand_NoMatch;
2757
2758  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2759  if (Num == -1)
2760    return MatchOperand_NoMatch;
2761
2762  Parser.Lex(); // Eat identifier token.
2763  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2764  return MatchOperand_Success;
2765}
2766
2767/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2768/// token must be an Identifier when called, and if it is a coprocessor
2769/// number, the token is eaten and the operand is added to the operand list.
2770ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2771parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2772  SMLoc S = Parser.getTok().getLoc();
2773  const AsmToken &Tok = Parser.getTok();
2774  if (Tok.isNot(AsmToken::Identifier))
2775    return MatchOperand_NoMatch;
2776
2777  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2778  if (Reg == -1)
2779    return MatchOperand_NoMatch;
2780
2781  Parser.Lex(); // Eat identifier token.
2782  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2783  return MatchOperand_Success;
2784}
2785
2786/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2787/// coproc_option : '{' imm0_255 '}'
2788ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2789parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2790  SMLoc S = Parser.getTok().getLoc();
2791
2792  // If this isn't a '{', this isn't a coprocessor immediate operand.
2793  if (Parser.getTok().isNot(AsmToken::LCurly))
2794    return MatchOperand_NoMatch;
2795  Parser.Lex(); // Eat the '{'
2796
2797  const MCExpr *Expr;
2798  SMLoc Loc = Parser.getTok().getLoc();
2799  if (getParser().ParseExpression(Expr)) {
2800    Error(Loc, "illegal expression");
2801    return MatchOperand_ParseFail;
2802  }
2803  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2804  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2805    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2806    return MatchOperand_ParseFail;
2807  }
2808  int Val = CE->getValue();
2809
2810  // Check for and consume the closing '}'
2811  if (Parser.getTok().isNot(AsmToken::RCurly))
2812    return MatchOperand_ParseFail;
2813  SMLoc E = Parser.getTok().getEndLoc();
2814  Parser.Lex(); // Eat the '}'
2815
2816  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2817  return MatchOperand_Success;
2818}
2819
2820// For register list parsing, we need to map from raw GPR register numbering
2821// to the enumeration values. The enumeration values aren't sorted by
2822// register number due to our using "sp", "lr" and "pc" as canonical names.
2823static unsigned getNextRegister(unsigned Reg) {
2824  // If this is a GPR, we need to do it manually, otherwise we can rely
2825  // on the sort ordering of the enumeration since the other reg-classes
2826  // are sane.
2827  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2828    return Reg + 1;
2829  switch(Reg) {
2830  default: llvm_unreachable("Invalid GPR number!");
2831  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2832  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2833  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2834  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2835  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2836  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2837  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2838  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2839  }
2840}
2841
2842// Return the low-subreg of a given Q register.
2843static unsigned getDRegFromQReg(unsigned QReg) {
2844  switch (QReg) {
2845  default: llvm_unreachable("expected a Q register!");
2846  case ARM::Q0:  return ARM::D0;
2847  case ARM::Q1:  return ARM::D2;
2848  case ARM::Q2:  return ARM::D4;
2849  case ARM::Q3:  return ARM::D6;
2850  case ARM::Q4:  return ARM::D8;
2851  case ARM::Q5:  return ARM::D10;
2852  case ARM::Q6:  return ARM::D12;
2853  case ARM::Q7:  return ARM::D14;
2854  case ARM::Q8:  return ARM::D16;
2855  case ARM::Q9:  return ARM::D18;
2856  case ARM::Q10: return ARM::D20;
2857  case ARM::Q11: return ARM::D22;
2858  case ARM::Q12: return ARM::D24;
2859  case ARM::Q13: return ARM::D26;
2860  case ARM::Q14: return ARM::D28;
2861  case ARM::Q15: return ARM::D30;
2862  }
2863}
2864
2865/// Parse a register list.
2866bool ARMAsmParser::
2867parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2868  assert(Parser.getTok().is(AsmToken::LCurly) &&
2869         "Token is not a Left Curly Brace");
2870  SMLoc S = Parser.getTok().getLoc();
2871  Parser.Lex(); // Eat '{' token.
2872  SMLoc RegLoc = Parser.getTok().getLoc();
2873
2874  // Check the first register in the list to see what register class
2875  // this is a list of.
2876  int Reg = tryParseRegister();
2877  if (Reg == -1)
2878    return Error(RegLoc, "register expected");
2879
2880  // The reglist instructions have at most 16 registers, so reserve
2881  // space for that many.
2882  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2883
2884  // Allow Q regs and just interpret them as the two D sub-registers.
2885  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2886    Reg = getDRegFromQReg(Reg);
2887    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2888    ++Reg;
2889  }
2890  const MCRegisterClass *RC;
2891  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2892    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2893  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2894    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2895  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2896    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2897  else
2898    return Error(RegLoc, "invalid register in register list");
2899
2900  // Store the register.
2901  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2902
2903  // This starts immediately after the first register token in the list,
2904  // so we can see either a comma or a minus (range separator) as a legal
2905  // next token.
2906  while (Parser.getTok().is(AsmToken::Comma) ||
2907         Parser.getTok().is(AsmToken::Minus)) {
2908    if (Parser.getTok().is(AsmToken::Minus)) {
2909      Parser.Lex(); // Eat the minus.
2910      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
2911      int EndReg = tryParseRegister();
2912      if (EndReg == -1)
2913        return Error(AfterMinusLoc, "register expected");
2914      // Allow Q regs and just interpret them as the two D sub-registers.
2915      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2916        EndReg = getDRegFromQReg(EndReg) + 1;
2917      // If the register is the same as the start reg, there's nothing
2918      // more to do.
2919      if (Reg == EndReg)
2920        continue;
2921      // The register must be in the same register class as the first.
2922      if (!RC->contains(EndReg))
2923        return Error(AfterMinusLoc, "invalid register in register list");
2924      // Ranges must go from low to high.
2925      if (MRI->getEncodingValue(Reg) > MRI->getEncodingValue(EndReg))
2926        return Error(AfterMinusLoc, "bad range in register list");
2927
2928      // Add all the registers in the range to the register list.
2929      while (Reg != EndReg) {
2930        Reg = getNextRegister(Reg);
2931        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2932      }
2933      continue;
2934    }
2935    Parser.Lex(); // Eat the comma.
2936    RegLoc = Parser.getTok().getLoc();
2937    int OldReg = Reg;
2938    const AsmToken RegTok = Parser.getTok();
2939    Reg = tryParseRegister();
2940    if (Reg == -1)
2941      return Error(RegLoc, "register expected");
2942    // Allow Q regs and just interpret them as the two D sub-registers.
2943    bool isQReg = false;
2944    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2945      Reg = getDRegFromQReg(Reg);
2946      isQReg = true;
2947    }
2948    // The register must be in the same register class as the first.
2949    if (!RC->contains(Reg))
2950      return Error(RegLoc, "invalid register in register list");
2951    // List must be monotonically increasing.
2952    if (MRI->getEncodingValue(Reg) < MRI->getEncodingValue(OldReg)) {
2953      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2954        Warning(RegLoc, "register list not in ascending order");
2955      else
2956        return Error(RegLoc, "register list not in ascending order");
2957    }
2958    if (MRI->getEncodingValue(Reg) == MRI->getEncodingValue(OldReg)) {
2959      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2960              ") in register list");
2961      continue;
2962    }
2963    // VFP register lists must also be contiguous.
2964    // It's OK to use the enumeration values directly here rather, as the
2965    // VFP register classes have the enum sorted properly.
2966    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2967        Reg != OldReg + 1)
2968      return Error(RegLoc, "non-contiguous register range");
2969    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2970    if (isQReg)
2971      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2972  }
2973
2974  if (Parser.getTok().isNot(AsmToken::RCurly))
2975    return Error(Parser.getTok().getLoc(), "'}' expected");
2976  SMLoc E = Parser.getTok().getEndLoc();
2977  Parser.Lex(); // Eat '}' token.
2978
2979  // Push the register list operand.
2980  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2981
2982  // The ARM system instruction variants for LDM/STM have a '^' token here.
2983  if (Parser.getTok().is(AsmToken::Caret)) {
2984    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2985    Parser.Lex(); // Eat '^' token.
2986  }
2987
2988  return false;
2989}
2990
2991// Helper function to parse the lane index for vector lists.
2992ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2993parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index, SMLoc &EndLoc) {
2994  Index = 0; // Always return a defined index value.
2995  if (Parser.getTok().is(AsmToken::LBrac)) {
2996    Parser.Lex(); // Eat the '['.
2997    if (Parser.getTok().is(AsmToken::RBrac)) {
2998      // "Dn[]" is the 'all lanes' syntax.
2999      LaneKind = AllLanes;
3000      EndLoc = Parser.getTok().getEndLoc();
3001      Parser.Lex(); // Eat the ']'.
3002      return MatchOperand_Success;
3003    }
3004
3005    // There's an optional '#' token here. Normally there wouldn't be, but
3006    // inline assemble puts one in, and it's friendly to accept that.
3007    if (Parser.getTok().is(AsmToken::Hash))
3008      Parser.Lex(); // Eat the '#'
3009
3010    const MCExpr *LaneIndex;
3011    SMLoc Loc = Parser.getTok().getLoc();
3012    if (getParser().ParseExpression(LaneIndex)) {
3013      Error(Loc, "illegal expression");
3014      return MatchOperand_ParseFail;
3015    }
3016    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
3017    if (!CE) {
3018      Error(Loc, "lane index must be empty or an integer");
3019      return MatchOperand_ParseFail;
3020    }
3021    if (Parser.getTok().isNot(AsmToken::RBrac)) {
3022      Error(Parser.getTok().getLoc(), "']' expected");
3023      return MatchOperand_ParseFail;
3024    }
3025    EndLoc = Parser.getTok().getEndLoc();
3026    Parser.Lex(); // Eat the ']'.
3027    int64_t Val = CE->getValue();
3028
3029    // FIXME: Make this range check context sensitive for .8, .16, .32.
3030    if (Val < 0 || Val > 7) {
3031      Error(Parser.getTok().getLoc(), "lane index out of range");
3032      return MatchOperand_ParseFail;
3033    }
3034    Index = Val;
3035    LaneKind = IndexedLane;
3036    return MatchOperand_Success;
3037  }
3038  LaneKind = NoLanes;
3039  return MatchOperand_Success;
3040}
3041
3042// parse a vector register list
3043ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3044parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3045  VectorLaneTy LaneKind;
3046  unsigned LaneIndex;
3047  SMLoc S = Parser.getTok().getLoc();
3048  // As an extension (to match gas), support a plain D register or Q register
3049  // (without encosing curly braces) as a single or double entry list,
3050  // respectively.
3051  if (Parser.getTok().is(AsmToken::Identifier)) {
3052    SMLoc E = Parser.getTok().getEndLoc();
3053    int Reg = tryParseRegister();
3054    if (Reg == -1)
3055      return MatchOperand_NoMatch;
3056    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3057      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3058      if (Res != MatchOperand_Success)
3059        return Res;
3060      switch (LaneKind) {
3061      case NoLanes:
3062        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3063        break;
3064      case AllLanes:
3065        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3066                                                                S, E));
3067        break;
3068      case IndexedLane:
3069        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3070                                                               LaneIndex,
3071                                                               false, S, E));
3072        break;
3073      }
3074      return MatchOperand_Success;
3075    }
3076    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3077      Reg = getDRegFromQReg(Reg);
3078      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex, E);
3079      if (Res != MatchOperand_Success)
3080        return Res;
3081      switch (LaneKind) {
3082      case NoLanes:
3083        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3084                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3085        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3086        break;
3087      case AllLanes:
3088        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3089                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3090        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3091                                                                S, E));
3092        break;
3093      case IndexedLane:
3094        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3095                                                               LaneIndex,
3096                                                               false, S, E));
3097        break;
3098      }
3099      return MatchOperand_Success;
3100    }
3101    Error(S, "vector register expected");
3102    return MatchOperand_ParseFail;
3103  }
3104
3105  if (Parser.getTok().isNot(AsmToken::LCurly))
3106    return MatchOperand_NoMatch;
3107
3108  Parser.Lex(); // Eat '{' token.
3109  SMLoc RegLoc = Parser.getTok().getLoc();
3110
3111  int Reg = tryParseRegister();
3112  if (Reg == -1) {
3113    Error(RegLoc, "register expected");
3114    return MatchOperand_ParseFail;
3115  }
3116  unsigned Count = 1;
3117  int Spacing = 0;
3118  unsigned FirstReg = Reg;
3119  // The list is of D registers, but we also allow Q regs and just interpret
3120  // them as the two D sub-registers.
3121  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3122    FirstReg = Reg = getDRegFromQReg(Reg);
3123    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3124                 // it's ambiguous with four-register single spaced.
3125    ++Reg;
3126    ++Count;
3127  }
3128
3129  SMLoc E;
3130  if (parseVectorLane(LaneKind, LaneIndex, E) != MatchOperand_Success)
3131    return MatchOperand_ParseFail;
3132
3133  while (Parser.getTok().is(AsmToken::Comma) ||
3134         Parser.getTok().is(AsmToken::Minus)) {
3135    if (Parser.getTok().is(AsmToken::Minus)) {
3136      if (!Spacing)
3137        Spacing = 1; // Register range implies a single spaced list.
3138      else if (Spacing == 2) {
3139        Error(Parser.getTok().getLoc(),
3140              "sequential registers in double spaced list");
3141        return MatchOperand_ParseFail;
3142      }
3143      Parser.Lex(); // Eat the minus.
3144      SMLoc AfterMinusLoc = Parser.getTok().getLoc();
3145      int EndReg = tryParseRegister();
3146      if (EndReg == -1) {
3147        Error(AfterMinusLoc, "register expected");
3148        return MatchOperand_ParseFail;
3149      }
3150      // Allow Q regs and just interpret them as the two D sub-registers.
3151      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3152        EndReg = getDRegFromQReg(EndReg) + 1;
3153      // If the register is the same as the start reg, there's nothing
3154      // more to do.
3155      if (Reg == EndReg)
3156        continue;
3157      // The register must be in the same register class as the first.
3158      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3159        Error(AfterMinusLoc, "invalid register in register list");
3160        return MatchOperand_ParseFail;
3161      }
3162      // Ranges must go from low to high.
3163      if (Reg > EndReg) {
3164        Error(AfterMinusLoc, "bad range in register list");
3165        return MatchOperand_ParseFail;
3166      }
3167      // Parse the lane specifier if present.
3168      VectorLaneTy NextLaneKind;
3169      unsigned NextLaneIndex;
3170      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3171          MatchOperand_Success)
3172        return MatchOperand_ParseFail;
3173      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3174        Error(AfterMinusLoc, "mismatched lane index in register list");
3175        return MatchOperand_ParseFail;
3176      }
3177
3178      // Add all the registers in the range to the register list.
3179      Count += EndReg - Reg;
3180      Reg = EndReg;
3181      continue;
3182    }
3183    Parser.Lex(); // Eat the comma.
3184    RegLoc = Parser.getTok().getLoc();
3185    int OldReg = Reg;
3186    Reg = tryParseRegister();
3187    if (Reg == -1) {
3188      Error(RegLoc, "register expected");
3189      return MatchOperand_ParseFail;
3190    }
3191    // vector register lists must be contiguous.
3192    // It's OK to use the enumeration values directly here rather, as the
3193    // VFP register classes have the enum sorted properly.
3194    //
3195    // The list is of D registers, but we also allow Q regs and just interpret
3196    // them as the two D sub-registers.
3197    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3198      if (!Spacing)
3199        Spacing = 1; // Register range implies a single spaced list.
3200      else if (Spacing == 2) {
3201        Error(RegLoc,
3202              "invalid register in double-spaced list (must be 'D' register')");
3203        return MatchOperand_ParseFail;
3204      }
3205      Reg = getDRegFromQReg(Reg);
3206      if (Reg != OldReg + 1) {
3207        Error(RegLoc, "non-contiguous register range");
3208        return MatchOperand_ParseFail;
3209      }
3210      ++Reg;
3211      Count += 2;
3212      // Parse the lane specifier if present.
3213      VectorLaneTy NextLaneKind;
3214      unsigned NextLaneIndex;
3215      SMLoc LaneLoc = Parser.getTok().getLoc();
3216      if (parseVectorLane(NextLaneKind, NextLaneIndex, E) !=
3217          MatchOperand_Success)
3218        return MatchOperand_ParseFail;
3219      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3220        Error(LaneLoc, "mismatched lane index in register list");
3221        return MatchOperand_ParseFail;
3222      }
3223      continue;
3224    }
3225    // Normal D register.
3226    // Figure out the register spacing (single or double) of the list if
3227    // we don't know it already.
3228    if (!Spacing)
3229      Spacing = 1 + (Reg == OldReg + 2);
3230
3231    // Just check that it's contiguous and keep going.
3232    if (Reg != OldReg + Spacing) {
3233      Error(RegLoc, "non-contiguous register range");
3234      return MatchOperand_ParseFail;
3235    }
3236    ++Count;
3237    // Parse the lane specifier if present.
3238    VectorLaneTy NextLaneKind;
3239    unsigned NextLaneIndex;
3240    SMLoc EndLoc = Parser.getTok().getLoc();
3241    if (parseVectorLane(NextLaneKind, NextLaneIndex, E) != MatchOperand_Success)
3242      return MatchOperand_ParseFail;
3243    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3244      Error(EndLoc, "mismatched lane index in register list");
3245      return MatchOperand_ParseFail;
3246    }
3247  }
3248
3249  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3250    Error(Parser.getTok().getLoc(), "'}' expected");
3251    return MatchOperand_ParseFail;
3252  }
3253  E = Parser.getTok().getEndLoc();
3254  Parser.Lex(); // Eat '}' token.
3255
3256  switch (LaneKind) {
3257  case NoLanes:
3258    // Two-register operands have been converted to the
3259    // composite register classes.
3260    if (Count == 2) {
3261      const MCRegisterClass *RC = (Spacing == 1) ?
3262        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3263        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3264      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3265    }
3266
3267    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3268                                                    (Spacing == 2), S, E));
3269    break;
3270  case AllLanes:
3271    // Two-register operands have been converted to the
3272    // composite register classes.
3273    if (Count == 2) {
3274      const MCRegisterClass *RC = (Spacing == 1) ?
3275        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3276        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3277      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3278    }
3279    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3280                                                            (Spacing == 2),
3281                                                            S, E));
3282    break;
3283  case IndexedLane:
3284    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3285                                                           LaneIndex,
3286                                                           (Spacing == 2),
3287                                                           S, E));
3288    break;
3289  }
3290  return MatchOperand_Success;
3291}
3292
3293/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3294ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3295parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3296  SMLoc S = Parser.getTok().getLoc();
3297  const AsmToken &Tok = Parser.getTok();
3298  unsigned Opt;
3299
3300  if (Tok.is(AsmToken::Identifier)) {
3301    StringRef OptStr = Tok.getString();
3302
3303    Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()).lower())
3304      .Case("sy",    ARM_MB::SY)
3305      .Case("st",    ARM_MB::ST)
3306      .Case("sh",    ARM_MB::ISH)
3307      .Case("ish",   ARM_MB::ISH)
3308      .Case("shst",  ARM_MB::ISHST)
3309      .Case("ishst", ARM_MB::ISHST)
3310      .Case("nsh",   ARM_MB::NSH)
3311      .Case("un",    ARM_MB::NSH)
3312      .Case("nshst", ARM_MB::NSHST)
3313      .Case("unst",  ARM_MB::NSHST)
3314      .Case("osh",   ARM_MB::OSH)
3315      .Case("oshst", ARM_MB::OSHST)
3316      .Default(~0U);
3317
3318    if (Opt == ~0U)
3319      return MatchOperand_NoMatch;
3320
3321    Parser.Lex(); // Eat identifier token.
3322  } else if (Tok.is(AsmToken::Hash) ||
3323             Tok.is(AsmToken::Dollar) ||
3324             Tok.is(AsmToken::Integer)) {
3325    if (Parser.getTok().isNot(AsmToken::Integer))
3326      Parser.Lex(); // Eat the '#'.
3327    SMLoc Loc = Parser.getTok().getLoc();
3328
3329    const MCExpr *MemBarrierID;
3330    if (getParser().ParseExpression(MemBarrierID)) {
3331      Error(Loc, "illegal expression");
3332      return MatchOperand_ParseFail;
3333    }
3334
3335    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(MemBarrierID);
3336    if (!CE) {
3337      Error(Loc, "constant expression expected");
3338      return MatchOperand_ParseFail;
3339    }
3340
3341    int Val = CE->getValue();
3342    if (Val & ~0xf) {
3343      Error(Loc, "immediate value out of range");
3344      return MatchOperand_ParseFail;
3345    }
3346
3347    Opt = ARM_MB::RESERVED_0 + Val;
3348  } else
3349    return MatchOperand_ParseFail;
3350
3351  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3352  return MatchOperand_Success;
3353}
3354
3355/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3356ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3357parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3358  SMLoc S = Parser.getTok().getLoc();
3359  const AsmToken &Tok = Parser.getTok();
3360  if (!Tok.is(AsmToken::Identifier))
3361    return MatchOperand_NoMatch;
3362  StringRef IFlagsStr = Tok.getString();
3363
3364  // An iflags string of "none" is interpreted to mean that none of the AIF
3365  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3366  unsigned IFlags = 0;
3367  if (IFlagsStr != "none") {
3368        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3369      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3370        .Case("a", ARM_PROC::A)
3371        .Case("i", ARM_PROC::I)
3372        .Case("f", ARM_PROC::F)
3373        .Default(~0U);
3374
3375      // If some specific iflag is already set, it means that some letter is
3376      // present more than once, this is not acceptable.
3377      if (Flag == ~0U || (IFlags & Flag))
3378        return MatchOperand_NoMatch;
3379
3380      IFlags |= Flag;
3381    }
3382  }
3383
3384  Parser.Lex(); // Eat identifier token.
3385  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3386  return MatchOperand_Success;
3387}
3388
3389/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3390ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3391parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3392  SMLoc S = Parser.getTok().getLoc();
3393  const AsmToken &Tok = Parser.getTok();
3394  if (!Tok.is(AsmToken::Identifier))
3395    return MatchOperand_NoMatch;
3396  StringRef Mask = Tok.getString();
3397
3398  if (isMClass()) {
3399    // See ARMv6-M 10.1.1
3400    std::string Name = Mask.lower();
3401    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3402      // Note: in the documentation:
3403      //  ARM deprecates using MSR APSR without a _<bits> qualifier as an alias
3404      //  for MSR APSR_nzcvq.
3405      // but we do make it an alias here.  This is so to get the "mask encoding"
3406      // bits correct on MSR APSR writes.
3407      //
3408      // FIXME: Note the 0xc00 "mask encoding" bits version of the registers
3409      // should really only be allowed when writing a special register.  Note
3410      // they get dropped in the MRS instruction reading a special register as
3411      // the SYSm field is only 8 bits.
3412      //
3413      // FIXME: the _g and _nzcvqg versions are only allowed if the processor
3414      // includes the DSP extension but that is not checked.
3415      .Case("apsr", 0x800)
3416      .Case("apsr_nzcvq", 0x800)
3417      .Case("apsr_g", 0x400)
3418      .Case("apsr_nzcvqg", 0xc00)
3419      .Case("iapsr", 0x801)
3420      .Case("iapsr_nzcvq", 0x801)
3421      .Case("iapsr_g", 0x401)
3422      .Case("iapsr_nzcvqg", 0xc01)
3423      .Case("eapsr", 0x802)
3424      .Case("eapsr_nzcvq", 0x802)
3425      .Case("eapsr_g", 0x402)
3426      .Case("eapsr_nzcvqg", 0xc02)
3427      .Case("xpsr", 0x803)
3428      .Case("xpsr_nzcvq", 0x803)
3429      .Case("xpsr_g", 0x403)
3430      .Case("xpsr_nzcvqg", 0xc03)
3431      .Case("ipsr", 0x805)
3432      .Case("epsr", 0x806)
3433      .Case("iepsr", 0x807)
3434      .Case("msp", 0x808)
3435      .Case("psp", 0x809)
3436      .Case("primask", 0x810)
3437      .Case("basepri", 0x811)
3438      .Case("basepri_max", 0x812)
3439      .Case("faultmask", 0x813)
3440      .Case("control", 0x814)
3441      .Default(~0U);
3442
3443    if (FlagsVal == ~0U)
3444      return MatchOperand_NoMatch;
3445
3446    if (!hasV7Ops() && FlagsVal >= 0x811 && FlagsVal <= 0x813)
3447      // basepri, basepri_max and faultmask only valid for V7m.
3448      return MatchOperand_NoMatch;
3449
3450    Parser.Lex(); // Eat identifier token.
3451    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3452    return MatchOperand_Success;
3453  }
3454
3455  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3456  size_t Start = 0, Next = Mask.find('_');
3457  StringRef Flags = "";
3458  std::string SpecReg = Mask.slice(Start, Next).lower();
3459  if (Next != StringRef::npos)
3460    Flags = Mask.slice(Next+1, Mask.size());
3461
3462  // FlagsVal contains the complete mask:
3463  // 3-0: Mask
3464  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3465  unsigned FlagsVal = 0;
3466
3467  if (SpecReg == "apsr") {
3468    FlagsVal = StringSwitch<unsigned>(Flags)
3469    .Case("nzcvq",  0x8) // same as CPSR_f
3470    .Case("g",      0x4) // same as CPSR_s
3471    .Case("nzcvqg", 0xc) // same as CPSR_fs
3472    .Default(~0U);
3473
3474    if (FlagsVal == ~0U) {
3475      if (!Flags.empty())
3476        return MatchOperand_NoMatch;
3477      else
3478        FlagsVal = 8; // No flag
3479    }
3480  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3481    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3482    if (Flags == "all" || Flags == "")
3483      Flags = "fc";
3484    for (int i = 0, e = Flags.size(); i != e; ++i) {
3485      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3486      .Case("c", 1)
3487      .Case("x", 2)
3488      .Case("s", 4)
3489      .Case("f", 8)
3490      .Default(~0U);
3491
3492      // If some specific flag is already set, it means that some letter is
3493      // present more than once, this is not acceptable.
3494      if (FlagsVal == ~0U || (FlagsVal & Flag))
3495        return MatchOperand_NoMatch;
3496      FlagsVal |= Flag;
3497    }
3498  } else // No match for special register.
3499    return MatchOperand_NoMatch;
3500
3501  // Special register without flags is NOT equivalent to "fc" flags.
3502  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3503  // two lines would enable gas compatibility at the expense of breaking
3504  // round-tripping.
3505  //
3506  // if (!FlagsVal)
3507  //  FlagsVal = 0x9;
3508
3509  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3510  if (SpecReg == "spsr")
3511    FlagsVal |= 16;
3512
3513  Parser.Lex(); // Eat identifier token.
3514  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3515  return MatchOperand_Success;
3516}
3517
3518ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3519parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3520            int Low, int High) {
3521  const AsmToken &Tok = Parser.getTok();
3522  if (Tok.isNot(AsmToken::Identifier)) {
3523    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3524    return MatchOperand_ParseFail;
3525  }
3526  StringRef ShiftName = Tok.getString();
3527  std::string LowerOp = Op.lower();
3528  std::string UpperOp = Op.upper();
3529  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3530    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3531    return MatchOperand_ParseFail;
3532  }
3533  Parser.Lex(); // Eat shift type token.
3534
3535  // There must be a '#' and a shift amount.
3536  if (Parser.getTok().isNot(AsmToken::Hash) &&
3537      Parser.getTok().isNot(AsmToken::Dollar)) {
3538    Error(Parser.getTok().getLoc(), "'#' expected");
3539    return MatchOperand_ParseFail;
3540  }
3541  Parser.Lex(); // Eat hash token.
3542
3543  const MCExpr *ShiftAmount;
3544  SMLoc Loc = Parser.getTok().getLoc();
3545  SMLoc EndLoc;
3546  if (getParser().ParseExpression(ShiftAmount, EndLoc)) {
3547    Error(Loc, "illegal expression");
3548    return MatchOperand_ParseFail;
3549  }
3550  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3551  if (!CE) {
3552    Error(Loc, "constant expression expected");
3553    return MatchOperand_ParseFail;
3554  }
3555  int Val = CE->getValue();
3556  if (Val < Low || Val > High) {
3557    Error(Loc, "immediate value out of range");
3558    return MatchOperand_ParseFail;
3559  }
3560
3561  Operands.push_back(ARMOperand::CreateImm(CE, Loc, EndLoc));
3562
3563  return MatchOperand_Success;
3564}
3565
3566ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3567parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3568  const AsmToken &Tok = Parser.getTok();
3569  SMLoc S = Tok.getLoc();
3570  if (Tok.isNot(AsmToken::Identifier)) {
3571    Error(S, "'be' or 'le' operand expected");
3572    return MatchOperand_ParseFail;
3573  }
3574  int Val = StringSwitch<int>(Tok.getString())
3575    .Case("be", 1)
3576    .Case("le", 0)
3577    .Default(-1);
3578  Parser.Lex(); // Eat the token.
3579
3580  if (Val == -1) {
3581    Error(S, "'be' or 'le' operand expected");
3582    return MatchOperand_ParseFail;
3583  }
3584  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3585                                                                  getContext()),
3586                                           S, Tok.getEndLoc()));
3587  return MatchOperand_Success;
3588}
3589
3590/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3591/// instructions. Legal values are:
3592///     lsl #n  'n' in [0,31]
3593///     asr #n  'n' in [1,32]
3594///             n == 32 encoded as n == 0.
3595ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3596parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3597  const AsmToken &Tok = Parser.getTok();
3598  SMLoc S = Tok.getLoc();
3599  if (Tok.isNot(AsmToken::Identifier)) {
3600    Error(S, "shift operator 'asr' or 'lsl' expected");
3601    return MatchOperand_ParseFail;
3602  }
3603  StringRef ShiftName = Tok.getString();
3604  bool isASR;
3605  if (ShiftName == "lsl" || ShiftName == "LSL")
3606    isASR = false;
3607  else if (ShiftName == "asr" || ShiftName == "ASR")
3608    isASR = true;
3609  else {
3610    Error(S, "shift operator 'asr' or 'lsl' expected");
3611    return MatchOperand_ParseFail;
3612  }
3613  Parser.Lex(); // Eat the operator.
3614
3615  // A '#' and a shift amount.
3616  if (Parser.getTok().isNot(AsmToken::Hash) &&
3617      Parser.getTok().isNot(AsmToken::Dollar)) {
3618    Error(Parser.getTok().getLoc(), "'#' expected");
3619    return MatchOperand_ParseFail;
3620  }
3621  Parser.Lex(); // Eat hash token.
3622  SMLoc ExLoc = Parser.getTok().getLoc();
3623
3624  const MCExpr *ShiftAmount;
3625  SMLoc EndLoc;
3626  if (getParser().ParseExpression(ShiftAmount, EndLoc)) {
3627    Error(ExLoc, "malformed shift expression");
3628    return MatchOperand_ParseFail;
3629  }
3630  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3631  if (!CE) {
3632    Error(ExLoc, "shift amount must be an immediate");
3633    return MatchOperand_ParseFail;
3634  }
3635
3636  int64_t Val = CE->getValue();
3637  if (isASR) {
3638    // Shift amount must be in [1,32]
3639    if (Val < 1 || Val > 32) {
3640      Error(ExLoc, "'asr' shift amount must be in range [1,32]");
3641      return MatchOperand_ParseFail;
3642    }
3643    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3644    if (isThumb() && Val == 32) {
3645      Error(ExLoc, "'asr #32' shift amount not allowed in Thumb mode");
3646      return MatchOperand_ParseFail;
3647    }
3648    if (Val == 32) Val = 0;
3649  } else {
3650    // Shift amount must be in [1,32]
3651    if (Val < 0 || Val > 31) {
3652      Error(ExLoc, "'lsr' shift amount must be in range [0,31]");
3653      return MatchOperand_ParseFail;
3654    }
3655  }
3656
3657  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, EndLoc));
3658
3659  return MatchOperand_Success;
3660}
3661
3662/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3663/// of instructions. Legal values are:
3664///     ror #n  'n' in {0, 8, 16, 24}
3665ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3666parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3667  const AsmToken &Tok = Parser.getTok();
3668  SMLoc S = Tok.getLoc();
3669  if (Tok.isNot(AsmToken::Identifier))
3670    return MatchOperand_NoMatch;
3671  StringRef ShiftName = Tok.getString();
3672  if (ShiftName != "ror" && ShiftName != "ROR")
3673    return MatchOperand_NoMatch;
3674  Parser.Lex(); // Eat the operator.
3675
3676  // A '#' and a rotate amount.
3677  if (Parser.getTok().isNot(AsmToken::Hash) &&
3678      Parser.getTok().isNot(AsmToken::Dollar)) {
3679    Error(Parser.getTok().getLoc(), "'#' expected");
3680    return MatchOperand_ParseFail;
3681  }
3682  Parser.Lex(); // Eat hash token.
3683  SMLoc ExLoc = Parser.getTok().getLoc();
3684
3685  const MCExpr *ShiftAmount;
3686  SMLoc EndLoc;
3687  if (getParser().ParseExpression(ShiftAmount, EndLoc)) {
3688    Error(ExLoc, "malformed rotate expression");
3689    return MatchOperand_ParseFail;
3690  }
3691  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3692  if (!CE) {
3693    Error(ExLoc, "rotate amount must be an immediate");
3694    return MatchOperand_ParseFail;
3695  }
3696
3697  int64_t Val = CE->getValue();
3698  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3699  // normally, zero is represented in asm by omitting the rotate operand
3700  // entirely.
3701  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3702    Error(ExLoc, "'ror' rotate amount must be 8, 16, or 24");
3703    return MatchOperand_ParseFail;
3704  }
3705
3706  Operands.push_back(ARMOperand::CreateRotImm(Val, S, EndLoc));
3707
3708  return MatchOperand_Success;
3709}
3710
3711ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3712parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3713  SMLoc S = Parser.getTok().getLoc();
3714  // The bitfield descriptor is really two operands, the LSB and the width.
3715  if (Parser.getTok().isNot(AsmToken::Hash) &&
3716      Parser.getTok().isNot(AsmToken::Dollar)) {
3717    Error(Parser.getTok().getLoc(), "'#' expected");
3718    return MatchOperand_ParseFail;
3719  }
3720  Parser.Lex(); // Eat hash token.
3721
3722  const MCExpr *LSBExpr;
3723  SMLoc E = Parser.getTok().getLoc();
3724  if (getParser().ParseExpression(LSBExpr)) {
3725    Error(E, "malformed immediate expression");
3726    return MatchOperand_ParseFail;
3727  }
3728  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3729  if (!CE) {
3730    Error(E, "'lsb' operand must be an immediate");
3731    return MatchOperand_ParseFail;
3732  }
3733
3734  int64_t LSB = CE->getValue();
3735  // The LSB must be in the range [0,31]
3736  if (LSB < 0 || LSB > 31) {
3737    Error(E, "'lsb' operand must be in the range [0,31]");
3738    return MatchOperand_ParseFail;
3739  }
3740  E = Parser.getTok().getLoc();
3741
3742  // Expect another immediate operand.
3743  if (Parser.getTok().isNot(AsmToken::Comma)) {
3744    Error(Parser.getTok().getLoc(), "too few operands");
3745    return MatchOperand_ParseFail;
3746  }
3747  Parser.Lex(); // Eat hash token.
3748  if (Parser.getTok().isNot(AsmToken::Hash) &&
3749      Parser.getTok().isNot(AsmToken::Dollar)) {
3750    Error(Parser.getTok().getLoc(), "'#' expected");
3751    return MatchOperand_ParseFail;
3752  }
3753  Parser.Lex(); // Eat hash token.
3754
3755  const MCExpr *WidthExpr;
3756  SMLoc EndLoc;
3757  if (getParser().ParseExpression(WidthExpr, EndLoc)) {
3758    Error(E, "malformed immediate expression");
3759    return MatchOperand_ParseFail;
3760  }
3761  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3762  if (!CE) {
3763    Error(E, "'width' operand must be an immediate");
3764    return MatchOperand_ParseFail;
3765  }
3766
3767  int64_t Width = CE->getValue();
3768  // The LSB must be in the range [1,32-lsb]
3769  if (Width < 1 || Width > 32 - LSB) {
3770    Error(E, "'width' operand must be in the range [1,32-lsb]");
3771    return MatchOperand_ParseFail;
3772  }
3773
3774  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, EndLoc));
3775
3776  return MatchOperand_Success;
3777}
3778
3779ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3780parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3781  // Check for a post-index addressing register operand. Specifically:
3782  // postidx_reg := '+' register {, shift}
3783  //              | '-' register {, shift}
3784  //              | register {, shift}
3785
3786  // This method must return MatchOperand_NoMatch without consuming any tokens
3787  // in the case where there is no match, as other alternatives take other
3788  // parse methods.
3789  AsmToken Tok = Parser.getTok();
3790  SMLoc S = Tok.getLoc();
3791  bool haveEaten = false;
3792  bool isAdd = true;
3793  if (Tok.is(AsmToken::Plus)) {
3794    Parser.Lex(); // Eat the '+' token.
3795    haveEaten = true;
3796  } else if (Tok.is(AsmToken::Minus)) {
3797    Parser.Lex(); // Eat the '-' token.
3798    isAdd = false;
3799    haveEaten = true;
3800  }
3801
3802  SMLoc E = Parser.getTok().getEndLoc();
3803  int Reg = tryParseRegister();
3804  if (Reg == -1) {
3805    if (!haveEaten)
3806      return MatchOperand_NoMatch;
3807    Error(Parser.getTok().getLoc(), "register expected");
3808    return MatchOperand_ParseFail;
3809  }
3810
3811  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3812  unsigned ShiftImm = 0;
3813  if (Parser.getTok().is(AsmToken::Comma)) {
3814    Parser.Lex(); // Eat the ','.
3815    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3816      return MatchOperand_ParseFail;
3817
3818    // FIXME: Only approximates end...may include intervening whitespace.
3819    E = Parser.getTok().getLoc();
3820  }
3821
3822  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3823                                                  ShiftImm, S, E));
3824
3825  return MatchOperand_Success;
3826}
3827
3828ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3829parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3830  // Check for a post-index addressing register operand. Specifically:
3831  // am3offset := '+' register
3832  //              | '-' register
3833  //              | register
3834  //              | # imm
3835  //              | # + imm
3836  //              | # - imm
3837
3838  // This method must return MatchOperand_NoMatch without consuming any tokens
3839  // in the case where there is no match, as other alternatives take other
3840  // parse methods.
3841  AsmToken Tok = Parser.getTok();
3842  SMLoc S = Tok.getLoc();
3843
3844  // Do immediates first, as we always parse those if we have a '#'.
3845  if (Parser.getTok().is(AsmToken::Hash) ||
3846      Parser.getTok().is(AsmToken::Dollar)) {
3847    Parser.Lex(); // Eat the '#'.
3848    // Explicitly look for a '-', as we need to encode negative zero
3849    // differently.
3850    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3851    const MCExpr *Offset;
3852    SMLoc E;
3853    if (getParser().ParseExpression(Offset, E))
3854      return MatchOperand_ParseFail;
3855    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3856    if (!CE) {
3857      Error(S, "constant expression expected");
3858      return MatchOperand_ParseFail;
3859    }
3860    // Negative zero is encoded as the flag value INT32_MIN.
3861    int32_t Val = CE->getValue();
3862    if (isNegative && Val == 0)
3863      Val = INT32_MIN;
3864
3865    Operands.push_back(
3866      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3867
3868    return MatchOperand_Success;
3869  }
3870
3871
3872  bool haveEaten = false;
3873  bool isAdd = true;
3874  if (Tok.is(AsmToken::Plus)) {
3875    Parser.Lex(); // Eat the '+' token.
3876    haveEaten = true;
3877  } else if (Tok.is(AsmToken::Minus)) {
3878    Parser.Lex(); // Eat the '-' token.
3879    isAdd = false;
3880    haveEaten = true;
3881  }
3882
3883  Tok = Parser.getTok();
3884  int Reg = tryParseRegister();
3885  if (Reg == -1) {
3886    if (!haveEaten)
3887      return MatchOperand_NoMatch;
3888    Error(Tok.getLoc(), "register expected");
3889    return MatchOperand_ParseFail;
3890  }
3891
3892  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3893                                                  0, S, Tok.getEndLoc()));
3894
3895  return MatchOperand_Success;
3896}
3897
3898/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3899/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3900/// when they refer multiple MIOperands inside a single one.
3901void ARMAsmParser::
3902cvtT2LdrdPre(MCInst &Inst,
3903             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3904  // Rt, Rt2
3905  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3906  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3907  // Create a writeback register dummy placeholder.
3908  Inst.addOperand(MCOperand::CreateReg(0));
3909  // addr
3910  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3911  // pred
3912  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3913}
3914
3915/// cvtT2StrdPre - Convert parsed operands to MCInst.
3916/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3917/// when they refer multiple MIOperands inside a single one.
3918void ARMAsmParser::
3919cvtT2StrdPre(MCInst &Inst,
3920             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3921  // Create a writeback register dummy placeholder.
3922  Inst.addOperand(MCOperand::CreateReg(0));
3923  // Rt, Rt2
3924  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3925  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3926  // addr
3927  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3928  // pred
3929  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3930}
3931
3932/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3933/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3934/// when they refer multiple MIOperands inside a single one.
3935void ARMAsmParser::
3936cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst,
3937                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3938  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3939
3940  // Create a writeback register dummy placeholder.
3941  Inst.addOperand(MCOperand::CreateImm(0));
3942
3943  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3944  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3945}
3946
3947/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3948/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3949/// when they refer multiple MIOperands inside a single one.
3950void ARMAsmParser::
3951cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst,
3952                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3953  // Create a writeback register dummy placeholder.
3954  Inst.addOperand(MCOperand::CreateImm(0));
3955  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3956  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3957  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3958}
3959
3960/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3961/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3962/// when they refer multiple MIOperands inside a single one.
3963void ARMAsmParser::
3964cvtLdWriteBackRegAddrMode2(MCInst &Inst,
3965                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3966  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3967
3968  // Create a writeback register dummy placeholder.
3969  Inst.addOperand(MCOperand::CreateImm(0));
3970
3971  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3972  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3973}
3974
3975/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3976/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3977/// when they refer multiple MIOperands inside a single one.
3978void ARMAsmParser::
3979cvtLdWriteBackRegAddrModeImm12(MCInst &Inst,
3980                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3981  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3982
3983  // Create a writeback register dummy placeholder.
3984  Inst.addOperand(MCOperand::CreateImm(0));
3985
3986  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3987  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3988}
3989
3990
3991/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3992/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3993/// when they refer multiple MIOperands inside a single one.
3994void ARMAsmParser::
3995cvtStWriteBackRegAddrModeImm12(MCInst &Inst,
3996                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3997  // Create a writeback register dummy placeholder.
3998  Inst.addOperand(MCOperand::CreateImm(0));
3999  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4000  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
4001  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4002}
4003
4004/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
4005/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4006/// when they refer multiple MIOperands inside a single one.
4007void ARMAsmParser::
4008cvtStWriteBackRegAddrMode2(MCInst &Inst,
4009                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4010  // Create a writeback register dummy placeholder.
4011  Inst.addOperand(MCOperand::CreateImm(0));
4012  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4013  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
4014  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4015}
4016
4017/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4018/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4019/// when they refer multiple MIOperands inside a single one.
4020void ARMAsmParser::
4021cvtStWriteBackRegAddrMode3(MCInst &Inst,
4022                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4023  // Create a writeback register dummy placeholder.
4024  Inst.addOperand(MCOperand::CreateImm(0));
4025  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4026  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4027  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4028}
4029
4030/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
4031/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4032/// when they refer multiple MIOperands inside a single one.
4033void ARMAsmParser::
4034cvtLdExtTWriteBackImm(MCInst &Inst,
4035                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4036  // Rt
4037  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4038  // Create a writeback register dummy placeholder.
4039  Inst.addOperand(MCOperand::CreateImm(0));
4040  // addr
4041  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4042  // offset
4043  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
4044  // pred
4045  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4046}
4047
4048/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
4049/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4050/// when they refer multiple MIOperands inside a single one.
4051void ARMAsmParser::
4052cvtLdExtTWriteBackReg(MCInst &Inst,
4053                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4054  // Rt
4055  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4056  // Create a writeback register dummy placeholder.
4057  Inst.addOperand(MCOperand::CreateImm(0));
4058  // addr
4059  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4060  // offset
4061  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4062  // pred
4063  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4064}
4065
4066/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
4067/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4068/// when they refer multiple MIOperands inside a single one.
4069void ARMAsmParser::
4070cvtStExtTWriteBackImm(MCInst &Inst,
4071                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4072  // Create a writeback register dummy placeholder.
4073  Inst.addOperand(MCOperand::CreateImm(0));
4074  // Rt
4075  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4076  // addr
4077  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4078  // offset
4079  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
4080  // pred
4081  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4082}
4083
4084/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
4085/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4086/// when they refer multiple MIOperands inside a single one.
4087void ARMAsmParser::
4088cvtStExtTWriteBackReg(MCInst &Inst,
4089                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4090  // Create a writeback register dummy placeholder.
4091  Inst.addOperand(MCOperand::CreateImm(0));
4092  // Rt
4093  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4094  // addr
4095  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
4096  // offset
4097  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
4098  // pred
4099  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4100}
4101
4102/// cvtLdrdPre - Convert parsed operands to MCInst.
4103/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4104/// when they refer multiple MIOperands inside a single one.
4105void ARMAsmParser::
4106cvtLdrdPre(MCInst &Inst,
4107           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4108  // Rt, Rt2
4109  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4110  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4111  // Create a writeback register dummy placeholder.
4112  Inst.addOperand(MCOperand::CreateImm(0));
4113  // addr
4114  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4115  // pred
4116  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4117}
4118
4119/// cvtStrdPre - Convert parsed operands to MCInst.
4120/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4121/// when they refer multiple MIOperands inside a single one.
4122void ARMAsmParser::
4123cvtStrdPre(MCInst &Inst,
4124           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4125  // Create a writeback register dummy placeholder.
4126  Inst.addOperand(MCOperand::CreateImm(0));
4127  // Rt, Rt2
4128  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4129  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4130  // addr
4131  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4132  // pred
4133  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4134}
4135
4136/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4137/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4138/// when they refer multiple MIOperands inside a single one.
4139void ARMAsmParser::
4140cvtLdWriteBackRegAddrMode3(MCInst &Inst,
4141                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4142  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4143  // Create a writeback register dummy placeholder.
4144  Inst.addOperand(MCOperand::CreateImm(0));
4145  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4146  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4147}
4148
4149/// cvtThumbMultiply - Convert parsed operands to MCInst.
4150/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4151/// when they refer multiple MIOperands inside a single one.
4152void ARMAsmParser::
4153cvtThumbMultiply(MCInst &Inst,
4154           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4155  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4156  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4157  // If we have a three-operand form, make sure to set Rn to be the operand
4158  // that isn't the same as Rd.
4159  unsigned RegOp = 4;
4160  if (Operands.size() == 6 &&
4161      ((ARMOperand*)Operands[4])->getReg() ==
4162        ((ARMOperand*)Operands[3])->getReg())
4163    RegOp = 5;
4164  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4165  Inst.addOperand(Inst.getOperand(0));
4166  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4167}
4168
4169void ARMAsmParser::
4170cvtVLDwbFixed(MCInst &Inst,
4171              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4172  // Vd
4173  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4174  // Create a writeback register dummy placeholder.
4175  Inst.addOperand(MCOperand::CreateImm(0));
4176  // Vn
4177  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4178  // pred
4179  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4180}
4181
4182void ARMAsmParser::
4183cvtVLDwbRegister(MCInst &Inst,
4184                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4185  // Vd
4186  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4187  // Create a writeback register dummy placeholder.
4188  Inst.addOperand(MCOperand::CreateImm(0));
4189  // Vn
4190  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4191  // Vm
4192  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4193  // pred
4194  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4195}
4196
4197void ARMAsmParser::
4198cvtVSTwbFixed(MCInst &Inst,
4199              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4200  // Create a writeback register dummy placeholder.
4201  Inst.addOperand(MCOperand::CreateImm(0));
4202  // Vn
4203  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4204  // Vt
4205  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4206  // pred
4207  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4208}
4209
4210void ARMAsmParser::
4211cvtVSTwbRegister(MCInst &Inst,
4212                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4213  // Create a writeback register dummy placeholder.
4214  Inst.addOperand(MCOperand::CreateImm(0));
4215  // Vn
4216  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4217  // Vm
4218  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4219  // Vt
4220  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4221  // pred
4222  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4223}
4224
4225/// Parse an ARM memory expression, return false if successful else return true
4226/// or an error.  The first token must be a '[' when called.
4227bool ARMAsmParser::
4228parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4229  SMLoc S, E;
4230  assert(Parser.getTok().is(AsmToken::LBrac) &&
4231         "Token is not a Left Bracket");
4232  S = Parser.getTok().getLoc();
4233  Parser.Lex(); // Eat left bracket token.
4234
4235  const AsmToken &BaseRegTok = Parser.getTok();
4236  int BaseRegNum = tryParseRegister();
4237  if (BaseRegNum == -1)
4238    return Error(BaseRegTok.getLoc(), "register expected");
4239
4240  // The next token must either be a comma, a colon or a closing bracket.
4241  const AsmToken &Tok = Parser.getTok();
4242  if (!Tok.is(AsmToken::Colon) && !Tok.is(AsmToken::Comma) &&
4243      !Tok.is(AsmToken::RBrac))
4244    return Error(Tok.getLoc(), "malformed memory operand");
4245
4246  if (Tok.is(AsmToken::RBrac)) {
4247    E = Tok.getEndLoc();
4248    Parser.Lex(); // Eat right bracket token.
4249
4250    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4251                                             0, 0, false, S, E));
4252
4253    // If there's a pre-indexing writeback marker, '!', just add it as a token
4254    // operand. It's rather odd, but syntactically valid.
4255    if (Parser.getTok().is(AsmToken::Exclaim)) {
4256      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4257      Parser.Lex(); // Eat the '!'.
4258    }
4259
4260    return false;
4261  }
4262
4263  assert((Tok.is(AsmToken::Colon) || Tok.is(AsmToken::Comma)) &&
4264         "Lost colon or comma in memory operand?!");
4265  if (Tok.is(AsmToken::Comma)) {
4266    Parser.Lex(); // Eat the comma.
4267  }
4268
4269  // If we have a ':', it's an alignment specifier.
4270  if (Parser.getTok().is(AsmToken::Colon)) {
4271    Parser.Lex(); // Eat the ':'.
4272    E = Parser.getTok().getLoc();
4273
4274    const MCExpr *Expr;
4275    if (getParser().ParseExpression(Expr))
4276     return true;
4277
4278    // The expression has to be a constant. Memory references with relocations
4279    // don't come through here, as they use the <label> forms of the relevant
4280    // instructions.
4281    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4282    if (!CE)
4283      return Error (E, "constant expression expected");
4284
4285    unsigned Align = 0;
4286    switch (CE->getValue()) {
4287    default:
4288      return Error(E,
4289                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4290    case 16:  Align = 2; break;
4291    case 32:  Align = 4; break;
4292    case 64:  Align = 8; break;
4293    case 128: Align = 16; break;
4294    case 256: Align = 32; break;
4295    }
4296
4297    // Now we should have the closing ']'
4298    if (Parser.getTok().isNot(AsmToken::RBrac))
4299      return Error(Parser.getTok().getLoc(), "']' expected");
4300    E = Parser.getTok().getEndLoc();
4301    Parser.Lex(); // Eat right bracket token.
4302
4303    // Don't worry about range checking the value here. That's handled by
4304    // the is*() predicates.
4305    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4306                                             ARM_AM::no_shift, 0, Align,
4307                                             false, S, E));
4308
4309    // If there's a pre-indexing writeback marker, '!', just add it as a token
4310    // operand.
4311    if (Parser.getTok().is(AsmToken::Exclaim)) {
4312      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4313      Parser.Lex(); // Eat the '!'.
4314    }
4315
4316    return false;
4317  }
4318
4319  // If we have a '#', it's an immediate offset, else assume it's a register
4320  // offset. Be friendly and also accept a plain integer (without a leading
4321  // hash) for gas compatibility.
4322  if (Parser.getTok().is(AsmToken::Hash) ||
4323      Parser.getTok().is(AsmToken::Dollar) ||
4324      Parser.getTok().is(AsmToken::Integer)) {
4325    if (Parser.getTok().isNot(AsmToken::Integer))
4326      Parser.Lex(); // Eat the '#'.
4327    E = Parser.getTok().getLoc();
4328
4329    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4330    const MCExpr *Offset;
4331    if (getParser().ParseExpression(Offset))
4332     return true;
4333
4334    // The expression has to be a constant. Memory references with relocations
4335    // don't come through here, as they use the <label> forms of the relevant
4336    // instructions.
4337    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4338    if (!CE)
4339      return Error (E, "constant expression expected");
4340
4341    // If the constant was #-0, represent it as INT32_MIN.
4342    int32_t Val = CE->getValue();
4343    if (isNegative && Val == 0)
4344      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4345
4346    // Now we should have the closing ']'
4347    if (Parser.getTok().isNot(AsmToken::RBrac))
4348      return Error(Parser.getTok().getLoc(), "']' expected");
4349    E = Parser.getTok().getEndLoc();
4350    Parser.Lex(); // Eat right bracket token.
4351
4352    // Don't worry about range checking the value here. That's handled by
4353    // the is*() predicates.
4354    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4355                                             ARM_AM::no_shift, 0, 0,
4356                                             false, S, E));
4357
4358    // If there's a pre-indexing writeback marker, '!', just add it as a token
4359    // operand.
4360    if (Parser.getTok().is(AsmToken::Exclaim)) {
4361      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4362      Parser.Lex(); // Eat the '!'.
4363    }
4364
4365    return false;
4366  }
4367
4368  // The register offset is optionally preceded by a '+' or '-'
4369  bool isNegative = false;
4370  if (Parser.getTok().is(AsmToken::Minus)) {
4371    isNegative = true;
4372    Parser.Lex(); // Eat the '-'.
4373  } else if (Parser.getTok().is(AsmToken::Plus)) {
4374    // Nothing to do.
4375    Parser.Lex(); // Eat the '+'.
4376  }
4377
4378  E = Parser.getTok().getLoc();
4379  int OffsetRegNum = tryParseRegister();
4380  if (OffsetRegNum == -1)
4381    return Error(E, "register expected");
4382
4383  // If there's a shift operator, handle it.
4384  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4385  unsigned ShiftImm = 0;
4386  if (Parser.getTok().is(AsmToken::Comma)) {
4387    Parser.Lex(); // Eat the ','.
4388    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4389      return true;
4390  }
4391
4392  // Now we should have the closing ']'
4393  if (Parser.getTok().isNot(AsmToken::RBrac))
4394    return Error(Parser.getTok().getLoc(), "']' expected");
4395  E = Parser.getTok().getEndLoc();
4396  Parser.Lex(); // Eat right bracket token.
4397
4398  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4399                                           ShiftType, ShiftImm, 0, isNegative,
4400                                           S, E));
4401
4402  // If there's a pre-indexing writeback marker, '!', just add it as a token
4403  // operand.
4404  if (Parser.getTok().is(AsmToken::Exclaim)) {
4405    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4406    Parser.Lex(); // Eat the '!'.
4407  }
4408
4409  return false;
4410}
4411
4412/// parseMemRegOffsetShift - one of these two:
4413///   ( lsl | lsr | asr | ror ) , # shift_amount
4414///   rrx
4415/// return true if it parses a shift otherwise it returns false.
4416bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4417                                          unsigned &Amount) {
4418  SMLoc Loc = Parser.getTok().getLoc();
4419  const AsmToken &Tok = Parser.getTok();
4420  if (Tok.isNot(AsmToken::Identifier))
4421    return true;
4422  StringRef ShiftName = Tok.getString();
4423  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4424      ShiftName == "asl" || ShiftName == "ASL")
4425    St = ARM_AM::lsl;
4426  else if (ShiftName == "lsr" || ShiftName == "LSR")
4427    St = ARM_AM::lsr;
4428  else if (ShiftName == "asr" || ShiftName == "ASR")
4429    St = ARM_AM::asr;
4430  else if (ShiftName == "ror" || ShiftName == "ROR")
4431    St = ARM_AM::ror;
4432  else if (ShiftName == "rrx" || ShiftName == "RRX")
4433    St = ARM_AM::rrx;
4434  else
4435    return Error(Loc, "illegal shift operator");
4436  Parser.Lex(); // Eat shift type token.
4437
4438  // rrx stands alone.
4439  Amount = 0;
4440  if (St != ARM_AM::rrx) {
4441    Loc = Parser.getTok().getLoc();
4442    // A '#' and a shift amount.
4443    const AsmToken &HashTok = Parser.getTok();
4444    if (HashTok.isNot(AsmToken::Hash) &&
4445        HashTok.isNot(AsmToken::Dollar))
4446      return Error(HashTok.getLoc(), "'#' expected");
4447    Parser.Lex(); // Eat hash token.
4448
4449    const MCExpr *Expr;
4450    if (getParser().ParseExpression(Expr))
4451      return true;
4452    // Range check the immediate.
4453    // lsl, ror: 0 <= imm <= 31
4454    // lsr, asr: 0 <= imm <= 32
4455    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4456    if (!CE)
4457      return Error(Loc, "shift amount must be an immediate");
4458    int64_t Imm = CE->getValue();
4459    if (Imm < 0 ||
4460        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4461        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4462      return Error(Loc, "immediate shift value out of range");
4463    // If <ShiftTy> #0, turn it into a no_shift.
4464    if (Imm == 0)
4465      St = ARM_AM::lsl;
4466    // For consistency, treat lsr #32 and asr #32 as having immediate value 0.
4467    if (Imm == 32)
4468      Imm = 0;
4469    Amount = Imm;
4470  }
4471
4472  return false;
4473}
4474
4475/// parseFPImm - A floating point immediate expression operand.
4476ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4477parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4478  // Anything that can accept a floating point constant as an operand
4479  // needs to go through here, as the regular ParseExpression is
4480  // integer only.
4481  //
4482  // This routine still creates a generic Immediate operand, containing
4483  // a bitcast of the 64-bit floating point value. The various operands
4484  // that accept floats can check whether the value is valid for them
4485  // via the standard is*() predicates.
4486
4487  SMLoc S = Parser.getTok().getLoc();
4488
4489  if (Parser.getTok().isNot(AsmToken::Hash) &&
4490      Parser.getTok().isNot(AsmToken::Dollar))
4491    return MatchOperand_NoMatch;
4492
4493  // Disambiguate the VMOV forms that can accept an FP immediate.
4494  // vmov.f32 <sreg>, #imm
4495  // vmov.f64 <dreg>, #imm
4496  // vmov.f32 <dreg>, #imm  @ vector f32x2
4497  // vmov.f32 <qreg>, #imm  @ vector f32x4
4498  //
4499  // There are also the NEON VMOV instructions which expect an
4500  // integer constant. Make sure we don't try to parse an FPImm
4501  // for these:
4502  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4503  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4504  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4505                           TyOp->getToken() != ".f64"))
4506    return MatchOperand_NoMatch;
4507
4508  Parser.Lex(); // Eat the '#'.
4509
4510  // Handle negation, as that still comes through as a separate token.
4511  bool isNegative = false;
4512  if (Parser.getTok().is(AsmToken::Minus)) {
4513    isNegative = true;
4514    Parser.Lex();
4515  }
4516  const AsmToken &Tok = Parser.getTok();
4517  SMLoc Loc = Tok.getLoc();
4518  if (Tok.is(AsmToken::Real)) {
4519    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4520    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4521    // If we had a '-' in front, toggle the sign bit.
4522    IntVal ^= (uint64_t)isNegative << 31;
4523    Parser.Lex(); // Eat the token.
4524    Operands.push_back(ARMOperand::CreateImm(
4525          MCConstantExpr::Create(IntVal, getContext()),
4526          S, Parser.getTok().getLoc()));
4527    return MatchOperand_Success;
4528  }
4529  // Also handle plain integers. Instructions which allow floating point
4530  // immediates also allow a raw encoded 8-bit value.
4531  if (Tok.is(AsmToken::Integer)) {
4532    int64_t Val = Tok.getIntVal();
4533    Parser.Lex(); // Eat the token.
4534    if (Val > 255 || Val < 0) {
4535      Error(Loc, "encoded floating point value out of range");
4536      return MatchOperand_ParseFail;
4537    }
4538    double RealVal = ARM_AM::getFPImmFloat(Val);
4539    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4540    Operands.push_back(ARMOperand::CreateImm(
4541        MCConstantExpr::Create(Val, getContext()), S,
4542        Parser.getTok().getLoc()));
4543    return MatchOperand_Success;
4544  }
4545
4546  Error(Loc, "invalid floating point immediate");
4547  return MatchOperand_ParseFail;
4548}
4549
4550/// Parse a arm instruction operand.  For now this parses the operand regardless
4551/// of the mnemonic.
4552bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4553                                StringRef Mnemonic) {
4554  SMLoc S, E;
4555
4556  // Check if the current operand has a custom associated parser, if so, try to
4557  // custom parse the operand, or fallback to the general approach.
4558  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4559  if (ResTy == MatchOperand_Success)
4560    return false;
4561  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4562  // there was a match, but an error occurred, in which case, just return that
4563  // the operand parsing failed.
4564  if (ResTy == MatchOperand_ParseFail)
4565    return true;
4566
4567  switch (getLexer().getKind()) {
4568  default:
4569    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4570    return true;
4571  case AsmToken::Identifier: {
4572    if (!tryParseRegisterWithWriteBack(Operands))
4573      return false;
4574    int Res = tryParseShiftRegister(Operands);
4575    if (Res == 0) // success
4576      return false;
4577    else if (Res == -1) // irrecoverable error
4578      return true;
4579    // If this is VMRS, check for the apsr_nzcv operand.
4580    if (Mnemonic == "vmrs" &&
4581        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4582      S = Parser.getTok().getLoc();
4583      Parser.Lex();
4584      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4585      return false;
4586    }
4587
4588    // Fall though for the Identifier case that is not a register or a
4589    // special name.
4590  }
4591  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4592  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4593  case AsmToken::String:  // quoted label names.
4594  case AsmToken::Dot: {   // . as a branch target
4595    // This was not a register so parse other operands that start with an
4596    // identifier (like labels) as expressions and create them as immediates.
4597    const MCExpr *IdVal;
4598    S = Parser.getTok().getLoc();
4599    if (getParser().ParseExpression(IdVal))
4600      return true;
4601    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4602    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4603    return false;
4604  }
4605  case AsmToken::LBrac:
4606    return parseMemory(Operands);
4607  case AsmToken::LCurly:
4608    return parseRegisterList(Operands);
4609  case AsmToken::Dollar:
4610  case AsmToken::Hash: {
4611    // #42 -> immediate.
4612    S = Parser.getTok().getLoc();
4613    Parser.Lex();
4614
4615    if (Parser.getTok().isNot(AsmToken::Colon)) {
4616      bool isNegative = Parser.getTok().is(AsmToken::Minus);
4617      const MCExpr *ImmVal;
4618      if (getParser().ParseExpression(ImmVal))
4619        return true;
4620      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4621      if (CE) {
4622        int32_t Val = CE->getValue();
4623        if (isNegative && Val == 0)
4624          ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4625      }
4626      E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4627      Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4628      return false;
4629    }
4630    // w/ a ':' after the '#', it's just like a plain ':'.
4631    // FALLTHROUGH
4632  }
4633  case AsmToken::Colon: {
4634    // ":lower16:" and ":upper16:" expression prefixes
4635    // FIXME: Check it's an expression prefix,
4636    // e.g. (FOO - :lower16:BAR) isn't legal.
4637    ARMMCExpr::VariantKind RefKind;
4638    if (parsePrefix(RefKind))
4639      return true;
4640
4641    const MCExpr *SubExprVal;
4642    if (getParser().ParseExpression(SubExprVal))
4643      return true;
4644
4645    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4646                                              getContext());
4647    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4648    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4649    return false;
4650  }
4651  }
4652}
4653
4654// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4655//  :lower16: and :upper16:.
4656bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4657  RefKind = ARMMCExpr::VK_ARM_None;
4658
4659  // :lower16: and :upper16: modifiers
4660  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4661  Parser.Lex(); // Eat ':'
4662
4663  if (getLexer().isNot(AsmToken::Identifier)) {
4664    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4665    return true;
4666  }
4667
4668  StringRef IDVal = Parser.getTok().getIdentifier();
4669  if (IDVal == "lower16") {
4670    RefKind = ARMMCExpr::VK_ARM_LO16;
4671  } else if (IDVal == "upper16") {
4672    RefKind = ARMMCExpr::VK_ARM_HI16;
4673  } else {
4674    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4675    return true;
4676  }
4677  Parser.Lex();
4678
4679  if (getLexer().isNot(AsmToken::Colon)) {
4680    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4681    return true;
4682  }
4683  Parser.Lex(); // Eat the last ':'
4684  return false;
4685}
4686
4687/// \brief Given a mnemonic, split out possible predication code and carry
4688/// setting letters to form a canonical mnemonic and flags.
4689//
4690// FIXME: Would be nice to autogen this.
4691// FIXME: This is a bit of a maze of special cases.
4692StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4693                                      unsigned &PredicationCode,
4694                                      bool &CarrySetting,
4695                                      unsigned &ProcessorIMod,
4696                                      StringRef &ITMask) {
4697  PredicationCode = ARMCC::AL;
4698  CarrySetting = false;
4699  ProcessorIMod = 0;
4700
4701  // Ignore some mnemonics we know aren't predicated forms.
4702  //
4703  // FIXME: Would be nice to autogen this.
4704  if ((Mnemonic == "movs" && isThumb()) ||
4705      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4706      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4707      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4708      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4709      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4710      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4711      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4712      Mnemonic == "fmuls")
4713    return Mnemonic;
4714
4715  // First, split out any predication code. Ignore mnemonics we know aren't
4716  // predicated but do have a carry-set and so weren't caught above.
4717  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4718      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4719      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4720      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4721    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4722      .Case("eq", ARMCC::EQ)
4723      .Case("ne", ARMCC::NE)
4724      .Case("hs", ARMCC::HS)
4725      .Case("cs", ARMCC::HS)
4726      .Case("lo", ARMCC::LO)
4727      .Case("cc", ARMCC::LO)
4728      .Case("mi", ARMCC::MI)
4729      .Case("pl", ARMCC::PL)
4730      .Case("vs", ARMCC::VS)
4731      .Case("vc", ARMCC::VC)
4732      .Case("hi", ARMCC::HI)
4733      .Case("ls", ARMCC::LS)
4734      .Case("ge", ARMCC::GE)
4735      .Case("lt", ARMCC::LT)
4736      .Case("gt", ARMCC::GT)
4737      .Case("le", ARMCC::LE)
4738      .Case("al", ARMCC::AL)
4739      .Default(~0U);
4740    if (CC != ~0U) {
4741      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4742      PredicationCode = CC;
4743    }
4744  }
4745
4746  // Next, determine if we have a carry setting bit. We explicitly ignore all
4747  // the instructions we know end in 's'.
4748  if (Mnemonic.endswith("s") &&
4749      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4750        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4751        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4752        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4753        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4754        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4755        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4756        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4757        Mnemonic == "vfms" || Mnemonic == "vfnms" ||
4758        (Mnemonic == "movs" && isThumb()))) {
4759    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4760    CarrySetting = true;
4761  }
4762
4763  // The "cps" instruction can have a interrupt mode operand which is glued into
4764  // the mnemonic. Check if this is the case, split it and parse the imod op
4765  if (Mnemonic.startswith("cps")) {
4766    // Split out any imod code.
4767    unsigned IMod =
4768      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4769      .Case("ie", ARM_PROC::IE)
4770      .Case("id", ARM_PROC::ID)
4771      .Default(~0U);
4772    if (IMod != ~0U) {
4773      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4774      ProcessorIMod = IMod;
4775    }
4776  }
4777
4778  // The "it" instruction has the condition mask on the end of the mnemonic.
4779  if (Mnemonic.startswith("it")) {
4780    ITMask = Mnemonic.slice(2, Mnemonic.size());
4781    Mnemonic = Mnemonic.slice(0, 2);
4782  }
4783
4784  return Mnemonic;
4785}
4786
4787/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4788/// inclusion of carry set or predication code operands.
4789//
4790// FIXME: It would be nice to autogen this.
4791void ARMAsmParser::
4792getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4793                      bool &CanAcceptPredicationCode) {
4794  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4795      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4796      Mnemonic == "add" || Mnemonic == "adc" ||
4797      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4798      Mnemonic == "orr" || Mnemonic == "mvn" ||
4799      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4800      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4801      Mnemonic == "vfm" || Mnemonic == "vfnm" ||
4802      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4803                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4804                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4805    CanAcceptCarrySet = true;
4806  } else
4807    CanAcceptCarrySet = false;
4808
4809  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4810      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4811      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4812      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4813      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4814      (Mnemonic == "clrex" && !isThumb()) ||
4815      (Mnemonic == "nop" && isThumbOne()) ||
4816      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4817        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4818        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4819      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4820       !isThumb()) ||
4821      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4822    CanAcceptPredicationCode = false;
4823  } else
4824    CanAcceptPredicationCode = true;
4825
4826  if (isThumb()) {
4827    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4828        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4829      CanAcceptPredicationCode = false;
4830  }
4831}
4832
4833bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4834                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4835  // FIXME: This is all horribly hacky. We really need a better way to deal
4836  // with optional operands like this in the matcher table.
4837
4838  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4839  // another does not. Specifically, the MOVW instruction does not. So we
4840  // special case it here and remove the defaulted (non-setting) cc_out
4841  // operand if that's the instruction we're trying to match.
4842  //
4843  // We do this as post-processing of the explicit operands rather than just
4844  // conditionally adding the cc_out in the first place because we need
4845  // to check the type of the parsed immediate operand.
4846  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4847      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4848      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4849      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4850    return true;
4851
4852  // Register-register 'add' for thumb does not have a cc_out operand
4853  // when there are only two register operands.
4854  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4855      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4856      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4857      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4858    return true;
4859  // Register-register 'add' for thumb does not have a cc_out operand
4860  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4861  // have to check the immediate range here since Thumb2 has a variant
4862  // that can handle a different range and has a cc_out operand.
4863  if (((isThumb() && Mnemonic == "add") ||
4864       (isThumbTwo() && Mnemonic == "sub")) &&
4865      Operands.size() == 6 &&
4866      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4867      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4868      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4869      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4870      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4871       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4872    return true;
4873  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4874  // imm0_4095 variant. That's the least-preferred variant when
4875  // selecting via the generic "add" mnemonic, so to know that we
4876  // should remove the cc_out operand, we have to explicitly check that
4877  // it's not one of the other variants. Ugh.
4878  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4879      Operands.size() == 6 &&
4880      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4881      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4882      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4883    // Nest conditions rather than one big 'if' statement for readability.
4884    //
4885    // If either register is a high reg, it's either one of the SP
4886    // variants (handled above) or a 32-bit encoding, so we just
4887    // check against T3. If the second register is the PC, this is an
4888    // alternate form of ADR, which uses encoding T4, so check for that too.
4889    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4890         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4891        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4892        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4893      return false;
4894    // If both registers are low, we're in an IT block, and the immediate is
4895    // in range, we should use encoding T1 instead, which has a cc_out.
4896    if (inITBlock() &&
4897        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4898        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4899        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4900      return false;
4901
4902    // Otherwise, we use encoding T4, which does not have a cc_out
4903    // operand.
4904    return true;
4905  }
4906
4907  // The thumb2 multiply instruction doesn't have a CCOut register, so
4908  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4909  // use the 16-bit encoding or not.
4910  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4911      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4912      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4913      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4914      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4915      // If the registers aren't low regs, the destination reg isn't the
4916      // same as one of the source regs, or the cc_out operand is zero
4917      // outside of an IT block, we have to use the 32-bit encoding, so
4918      // remove the cc_out operand.
4919      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4920       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4921       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4922       !inITBlock() ||
4923       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4924        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4925        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4926        static_cast<ARMOperand*>(Operands[4])->getReg())))
4927    return true;
4928
4929  // Also check the 'mul' syntax variant that doesn't specify an explicit
4930  // destination register.
4931  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4932      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4933      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4934      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4935      // If the registers aren't low regs  or the cc_out operand is zero
4936      // outside of an IT block, we have to use the 32-bit encoding, so
4937      // remove the cc_out operand.
4938      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4939       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4940       !inITBlock()))
4941    return true;
4942
4943
4944
4945  // Register-register 'add/sub' for thumb does not have a cc_out operand
4946  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4947  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4948  // right, this will result in better diagnostics (which operand is off)
4949  // anyway.
4950  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4951      (Operands.size() == 5 || Operands.size() == 6) &&
4952      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4953      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4954      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4955      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4956       (Operands.size() == 6 &&
4957        static_cast<ARMOperand*>(Operands[5])->isImm())))
4958    return true;
4959
4960  return false;
4961}
4962
4963static bool isDataTypeToken(StringRef Tok) {
4964  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4965    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4966    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4967    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4968    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4969    Tok == ".f" || Tok == ".d";
4970}
4971
4972// FIXME: This bit should probably be handled via an explicit match class
4973// in the .td files that matches the suffix instead of having it be
4974// a literal string token the way it is now.
4975static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4976  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4977}
4978
4979static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4980/// Parse an arm instruction mnemonic followed by its operands.
4981bool ARMAsmParser::ParseInstruction(ParseInstructionInfo &Info, StringRef Name,
4982                                    SMLoc NameLoc,
4983                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4984  // Apply mnemonic aliases before doing anything else, as the destination
4985  // mnemnonic may include suffices and we want to handle them normally.
4986  // The generic tblgen'erated code does this later, at the start of
4987  // MatchInstructionImpl(), but that's too late for aliases that include
4988  // any sort of suffix.
4989  unsigned AvailableFeatures = getAvailableFeatures();
4990  applyMnemonicAliases(Name, AvailableFeatures);
4991
4992  // First check for the ARM-specific .req directive.
4993  if (Parser.getTok().is(AsmToken::Identifier) &&
4994      Parser.getTok().getIdentifier() == ".req") {
4995    parseDirectiveReq(Name, NameLoc);
4996    // We always return 'error' for this, as we're done with this
4997    // statement and don't need to match the 'instruction."
4998    return true;
4999  }
5000
5001  // Create the leading tokens for the mnemonic, split by '.' characters.
5002  size_t Start = 0, Next = Name.find('.');
5003  StringRef Mnemonic = Name.slice(Start, Next);
5004
5005  // Split out the predication code and carry setting flag from the mnemonic.
5006  unsigned PredicationCode;
5007  unsigned ProcessorIMod;
5008  bool CarrySetting;
5009  StringRef ITMask;
5010  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
5011                           ProcessorIMod, ITMask);
5012
5013  // In Thumb1, only the branch (B) instruction can be predicated.
5014  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
5015    Parser.EatToEndOfStatement();
5016    return Error(NameLoc, "conditional execution not supported in Thumb1");
5017  }
5018
5019  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
5020
5021  // Handle the IT instruction ITMask. Convert it to a bitmask. This
5022  // is the mask as it will be for the IT encoding if the conditional
5023  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
5024  // where the conditional bit0 is zero, the instruction post-processing
5025  // will adjust the mask accordingly.
5026  if (Mnemonic == "it") {
5027    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
5028    if (ITMask.size() > 3) {
5029      Parser.EatToEndOfStatement();
5030      return Error(Loc, "too many conditions on IT instruction");
5031    }
5032    unsigned Mask = 8;
5033    for (unsigned i = ITMask.size(); i != 0; --i) {
5034      char pos = ITMask[i - 1];
5035      if (pos != 't' && pos != 'e') {
5036        Parser.EatToEndOfStatement();
5037        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
5038      }
5039      Mask >>= 1;
5040      if (ITMask[i - 1] == 't')
5041        Mask |= 8;
5042    }
5043    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
5044  }
5045
5046  // FIXME: This is all a pretty gross hack. We should automatically handle
5047  // optional operands like this via tblgen.
5048
5049  // Next, add the CCOut and ConditionCode operands, if needed.
5050  //
5051  // For mnemonics which can ever incorporate a carry setting bit or predication
5052  // code, our matching model involves us always generating CCOut and
5053  // ConditionCode operands to match the mnemonic "as written" and then we let
5054  // the matcher deal with finding the right instruction or generating an
5055  // appropriate error.
5056  bool CanAcceptCarrySet, CanAcceptPredicationCode;
5057  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
5058
5059  // If we had a carry-set on an instruction that can't do that, issue an
5060  // error.
5061  if (!CanAcceptCarrySet && CarrySetting) {
5062    Parser.EatToEndOfStatement();
5063    return Error(NameLoc, "instruction '" + Mnemonic +
5064                 "' can not set flags, but 's' suffix specified");
5065  }
5066  // If we had a predication code on an instruction that can't do that, issue an
5067  // error.
5068  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
5069    Parser.EatToEndOfStatement();
5070    return Error(NameLoc, "instruction '" + Mnemonic +
5071                 "' is not predicable, but condition code specified");
5072  }
5073
5074  // Add the carry setting operand, if necessary.
5075  if (CanAcceptCarrySet) {
5076    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
5077    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
5078                                               Loc));
5079  }
5080
5081  // Add the predication code operand, if necessary.
5082  if (CanAcceptPredicationCode) {
5083    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
5084                                      CarrySetting);
5085    Operands.push_back(ARMOperand::CreateCondCode(
5086                         ARMCC::CondCodes(PredicationCode), Loc));
5087  }
5088
5089  // Add the processor imod operand, if necessary.
5090  if (ProcessorIMod) {
5091    Operands.push_back(ARMOperand::CreateImm(
5092          MCConstantExpr::Create(ProcessorIMod, getContext()),
5093                                 NameLoc, NameLoc));
5094  }
5095
5096  // Add the remaining tokens in the mnemonic.
5097  while (Next != StringRef::npos) {
5098    Start = Next;
5099    Next = Name.find('.', Start + 1);
5100    StringRef ExtraToken = Name.slice(Start, Next);
5101
5102    // Some NEON instructions have an optional datatype suffix that is
5103    // completely ignored. Check for that.
5104    if (isDataTypeToken(ExtraToken) &&
5105        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5106      continue;
5107
5108    if (ExtraToken != ".n") {
5109      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5110      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5111    }
5112  }
5113
5114  // Read the remaining operands.
5115  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5116    // Read the first operand.
5117    if (parseOperand(Operands, Mnemonic)) {
5118      Parser.EatToEndOfStatement();
5119      return true;
5120    }
5121
5122    while (getLexer().is(AsmToken::Comma)) {
5123      Parser.Lex();  // Eat the comma.
5124
5125      // Parse and remember the operand.
5126      if (parseOperand(Operands, Mnemonic)) {
5127        Parser.EatToEndOfStatement();
5128        return true;
5129      }
5130    }
5131  }
5132
5133  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5134    SMLoc Loc = getLexer().getLoc();
5135    Parser.EatToEndOfStatement();
5136    return Error(Loc, "unexpected token in argument list");
5137  }
5138
5139  Parser.Lex(); // Consume the EndOfStatement
5140
5141  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5142  // do and don't have a cc_out optional-def operand. With some spot-checks
5143  // of the operand list, we can figure out which variant we're trying to
5144  // parse and adjust accordingly before actually matching. We shouldn't ever
5145  // try to remove a cc_out operand that was explicitly set on the the
5146  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5147  // table driven matcher doesn't fit well with the ARM instruction set.
5148  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5149    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5150    Operands.erase(Operands.begin() + 1);
5151    delete Op;
5152  }
5153
5154  // ARM mode 'blx' need special handling, as the register operand version
5155  // is predicable, but the label operand version is not. So, we can't rely
5156  // on the Mnemonic based checking to correctly figure out when to put
5157  // a k_CondCode operand in the list. If we're trying to match the label
5158  // version, remove the k_CondCode operand here.
5159  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5160      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5161    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5162    Operands.erase(Operands.begin() + 1);
5163    delete Op;
5164  }
5165
5166  // Adjust operands of ldrexd/strexd to MCK_GPRPair.
5167  // ldrexd/strexd require even/odd GPR pair. To enforce this constraint,
5168  // a single GPRPair reg operand is used in the .td file to replace the two
5169  // GPRs. However, when parsing from asm, the two GRPs cannot be automatically
5170  // expressed as a GPRPair, so we have to manually merge them.
5171  // FIXME: We would really like to be able to tablegen'erate this.
5172  if (!isThumb() && Operands.size() > 4 &&
5173      (Mnemonic == "ldrexd" || Mnemonic == "strexd")) {
5174    bool isLoad = (Mnemonic == "ldrexd");
5175    unsigned Idx = isLoad ? 2 : 3;
5176    ARMOperand* Op1 = static_cast<ARMOperand*>(Operands[Idx]);
5177    ARMOperand* Op2 = static_cast<ARMOperand*>(Operands[Idx+1]);
5178
5179    const MCRegisterClass& MRC = MRI->getRegClass(ARM::GPRRegClassID);
5180    // Adjust only if Op1 and Op2 are GPRs.
5181    if (Op1->isReg() && Op2->isReg() && MRC.contains(Op1->getReg()) &&
5182        MRC.contains(Op2->getReg())) {
5183      unsigned Reg1 = Op1->getReg();
5184      unsigned Reg2 = Op2->getReg();
5185      unsigned Rt = MRI->getEncodingValue(Reg1);
5186      unsigned Rt2 = MRI->getEncodingValue(Reg2);
5187
5188      // Rt2 must be Rt + 1 and Rt must be even.
5189      if (Rt + 1 != Rt2 || (Rt & 1)) {
5190        Error(Op2->getStartLoc(), isLoad ?
5191            "destination operands must be sequential" :
5192            "source operands must be sequential");
5193        return true;
5194      }
5195      unsigned NewReg = MRI->getMatchingSuperReg(Reg1, ARM::gsub_0,
5196          &(MRI->getRegClass(ARM::GPRPairRegClassID)));
5197      Operands.erase(Operands.begin() + Idx, Operands.begin() + Idx + 2);
5198      Operands.insert(Operands.begin() + Idx, ARMOperand::CreateReg(
5199            NewReg, Op1->getStartLoc(), Op2->getEndLoc()));
5200      delete Op1;
5201      delete Op2;
5202    }
5203  }
5204
5205  return false;
5206}
5207
5208// Validate context-sensitive operand constraints.
5209
5210// return 'true' if register list contains non-low GPR registers,
5211// 'false' otherwise. If Reg is in the register list or is HiReg, set
5212// 'containsReg' to true.
5213static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5214                                 unsigned HiReg, bool &containsReg) {
5215  containsReg = false;
5216  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5217    unsigned OpReg = Inst.getOperand(i).getReg();
5218    if (OpReg == Reg)
5219      containsReg = true;
5220    // Anything other than a low register isn't legal here.
5221    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5222      return true;
5223  }
5224  return false;
5225}
5226
5227// Check if the specified regisgter is in the register list of the inst,
5228// starting at the indicated operand number.
5229static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5230  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5231    unsigned OpReg = Inst.getOperand(i).getReg();
5232    if (OpReg == Reg)
5233      return true;
5234  }
5235  return false;
5236}
5237
5238// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5239// the ARMInsts array) instead. Getting that here requires awkward
5240// API changes, though. Better way?
5241namespace llvm {
5242extern const MCInstrDesc ARMInsts[];
5243}
5244static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5245  return ARMInsts[Opcode];
5246}
5247
5248// FIXME: We would really like to be able to tablegen'erate this.
5249bool ARMAsmParser::
5250validateInstruction(MCInst &Inst,
5251                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5252  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5253  SMLoc Loc = Operands[0]->getStartLoc();
5254  // Check the IT block state first.
5255  // NOTE: BKPT instruction has the interesting property of being
5256  // allowed in IT blocks, but not being predicable.  It just always
5257  // executes.
5258  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5259      Inst.getOpcode() != ARM::BKPT) {
5260    unsigned bit = 1;
5261    if (ITState.FirstCond)
5262      ITState.FirstCond = false;
5263    else
5264      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5265    // The instruction must be predicable.
5266    if (!MCID.isPredicable())
5267      return Error(Loc, "instructions in IT block must be predicable");
5268    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5269    unsigned ITCond = bit ? ITState.Cond :
5270      ARMCC::getOppositeCondition(ITState.Cond);
5271    if (Cond != ITCond) {
5272      // Find the condition code Operand to get its SMLoc information.
5273      SMLoc CondLoc;
5274      for (unsigned i = 1; i < Operands.size(); ++i)
5275        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5276          CondLoc = Operands[i]->getStartLoc();
5277      return Error(CondLoc, "incorrect condition in IT block; got '" +
5278                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5279                   "', but expected '" +
5280                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5281    }
5282  // Check for non-'al' condition codes outside of the IT block.
5283  } else if (isThumbTwo() && MCID.isPredicable() &&
5284             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5285             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5286             Inst.getOpcode() != ARM::t2B)
5287    return Error(Loc, "predicated instructions must be in IT block");
5288
5289  switch (Inst.getOpcode()) {
5290  case ARM::LDRD:
5291  case ARM::LDRD_PRE:
5292  case ARM::LDRD_POST: {
5293    // Rt2 must be Rt + 1.
5294    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5295    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5296    if (Rt2 != Rt + 1)
5297      return Error(Operands[3]->getStartLoc(),
5298                   "destination operands must be sequential");
5299    return false;
5300  }
5301  case ARM::STRD: {
5302    // Rt2 must be Rt + 1.
5303    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(0).getReg());
5304    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5305    if (Rt2 != Rt + 1)
5306      return Error(Operands[3]->getStartLoc(),
5307                   "source operands must be sequential");
5308    return false;
5309  }
5310  case ARM::STRD_PRE:
5311  case ARM::STRD_POST: {
5312    // Rt2 must be Rt + 1.
5313    unsigned Rt = MRI->getEncodingValue(Inst.getOperand(1).getReg());
5314    unsigned Rt2 = MRI->getEncodingValue(Inst.getOperand(2).getReg());
5315    if (Rt2 != Rt + 1)
5316      return Error(Operands[3]->getStartLoc(),
5317                   "source operands must be sequential");
5318    return false;
5319  }
5320  case ARM::SBFX:
5321  case ARM::UBFX: {
5322    // width must be in range [1, 32-lsb]
5323    unsigned lsb = Inst.getOperand(2).getImm();
5324    unsigned widthm1 = Inst.getOperand(3).getImm();
5325    if (widthm1 >= 32 - lsb)
5326      return Error(Operands[5]->getStartLoc(),
5327                   "bitfield width must be in range [1,32-lsb]");
5328    return false;
5329  }
5330  case ARM::tLDMIA: {
5331    // If we're parsing Thumb2, the .w variant is available and handles
5332    // most cases that are normally illegal for a Thumb1 LDM
5333    // instruction. We'll make the transformation in processInstruction()
5334    // if necessary.
5335    //
5336    // Thumb LDM instructions are writeback iff the base register is not
5337    // in the register list.
5338    unsigned Rn = Inst.getOperand(0).getReg();
5339    bool hasWritebackToken =
5340      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5341       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5342    bool listContainsBase;
5343    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5344      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5345                   "registers must be in range r0-r7");
5346    // If we should have writeback, then there should be a '!' token.
5347    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5348      return Error(Operands[2]->getStartLoc(),
5349                   "writeback operator '!' expected");
5350    // If we should not have writeback, there must not be a '!'. This is
5351    // true even for the 32-bit wide encodings.
5352    if (listContainsBase && hasWritebackToken)
5353      return Error(Operands[3]->getStartLoc(),
5354                   "writeback operator '!' not allowed when base register "
5355                   "in register list");
5356
5357    break;
5358  }
5359  case ARM::t2LDMIA_UPD: {
5360    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5361      return Error(Operands[4]->getStartLoc(),
5362                   "writeback operator '!' not allowed when base register "
5363                   "in register list");
5364    break;
5365  }
5366  case ARM::tMUL: {
5367    // The second source operand must be the same register as the destination
5368    // operand.
5369    //
5370    // In this case, we must directly check the parsed operands because the
5371    // cvtThumbMultiply() function is written in such a way that it guarantees
5372    // this first statement is always true for the new Inst.  Essentially, the
5373    // destination is unconditionally copied into the second source operand
5374    // without checking to see if it matches what we actually parsed.
5375    if (Operands.size() == 6 &&
5376        (((ARMOperand*)Operands[3])->getReg() !=
5377         ((ARMOperand*)Operands[5])->getReg()) &&
5378        (((ARMOperand*)Operands[3])->getReg() !=
5379         ((ARMOperand*)Operands[4])->getReg())) {
5380      return Error(Operands[3]->getStartLoc(),
5381                   "destination register must match source register");
5382    }
5383    break;
5384  }
5385  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5386  // so only issue a diagnostic for thumb1. The instructions will be
5387  // switched to the t2 encodings in processInstruction() if necessary.
5388  case ARM::tPOP: {
5389    bool listContainsBase;
5390    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5391        !isThumbTwo())
5392      return Error(Operands[2]->getStartLoc(),
5393                   "registers must be in range r0-r7 or pc");
5394    break;
5395  }
5396  case ARM::tPUSH: {
5397    bool listContainsBase;
5398    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5399        !isThumbTwo())
5400      return Error(Operands[2]->getStartLoc(),
5401                   "registers must be in range r0-r7 or lr");
5402    break;
5403  }
5404  case ARM::tSTMIA_UPD: {
5405    bool listContainsBase;
5406    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5407      return Error(Operands[4]->getStartLoc(),
5408                   "registers must be in range r0-r7");
5409    break;
5410  }
5411  case ARM::tADDrSP: {
5412    // If the non-SP source operand and the destination operand are not the
5413    // same, we need thumb2 (for the wide encoding), or we have an error.
5414    if (!isThumbTwo() &&
5415        Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
5416      return Error(Operands[4]->getStartLoc(),
5417                   "source register must be the same as destination");
5418    }
5419    break;
5420  }
5421  }
5422
5423  return false;
5424}
5425
5426static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5427  switch(Opc) {
5428  default: llvm_unreachable("unexpected opcode!");
5429  // VST1LN
5430  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5431  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5432  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5433  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5434  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5435  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5436  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5437  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5438  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5439
5440  // VST2LN
5441  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5442  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5443  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5444  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5445  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5446
5447  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5448  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5449  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5450  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5451  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5452
5453  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5454  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5455  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5456  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5457  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5458
5459  // VST3LN
5460  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5461  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5462  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5463  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5464  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5465  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5466  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5467  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5468  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5469  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5470  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5471  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5472  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5473  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5474  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5475
5476  // VST3
5477  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5478  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5479  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5480  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5481  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5482  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5483  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5484  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5485  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5486  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5487  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5488  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5489  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5490  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5491  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5492  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5493  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5494  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5495
5496  // VST4LN
5497  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5498  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5499  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5500  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5501  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5502  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5503  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5504  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5505  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5506  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5507  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5508  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5509  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5510  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5511  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5512
5513  // VST4
5514  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5515  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5516  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5517  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5518  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5519  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5520  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5521  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5522  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5523  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5524  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5525  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5526  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5527  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5528  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5529  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5530  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5531  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5532  }
5533}
5534
5535static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5536  switch(Opc) {
5537  default: llvm_unreachable("unexpected opcode!");
5538  // VLD1LN
5539  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5540  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5541  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5542  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5543  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5544  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5545  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5546  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5547  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5548
5549  // VLD2LN
5550  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5551  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5552  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5553  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5554  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5555  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5556  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5557  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5558  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5559  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5560  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5561  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5562  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5563  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5564  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5565
5566  // VLD3DUP
5567  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5568  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5569  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5570  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5571  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5572  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5573  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5574  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5575  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5576  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5577  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5578  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5579  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5580  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5581  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5582  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5583  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5584  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5585
5586  // VLD3LN
5587  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5588  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5589  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5590  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5591  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5592  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5593  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5594  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5595  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5596  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5597  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5598  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5599  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5600  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5601  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5602
5603  // VLD3
5604  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5605  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5606  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5607  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5608  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5609  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5610  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5611  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5612  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5613  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5614  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5615  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5616  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5617  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5618  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5619  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5620  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5621  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5622
5623  // VLD4LN
5624  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5625  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5626  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5627  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5628  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5629  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5630  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5631  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5632  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5633  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5634  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5635  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5636  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5637  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5638  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5639
5640  // VLD4DUP
5641  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5642  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5643  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5644  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5645  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5646  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5647  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5648  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5649  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5650  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5651  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5652  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5653  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5654  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5655  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5656  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5657  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5658  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5659
5660  // VLD4
5661  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5662  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5663  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5664  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5665  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5666  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5667  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5668  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5669  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5670  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5671  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5672  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5673  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5674  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5675  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5676  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5677  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5678  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5679  }
5680}
5681
5682bool ARMAsmParser::
5683processInstruction(MCInst &Inst,
5684                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5685  switch (Inst.getOpcode()) {
5686  // Alias for alternate form of 'ADR Rd, #imm' instruction.
5687  case ARM::ADDri: {
5688    if (Inst.getOperand(1).getReg() != ARM::PC ||
5689        Inst.getOperand(5).getReg() != 0)
5690      return false;
5691    MCInst TmpInst;
5692    TmpInst.setOpcode(ARM::ADR);
5693    TmpInst.addOperand(Inst.getOperand(0));
5694    TmpInst.addOperand(Inst.getOperand(2));
5695    TmpInst.addOperand(Inst.getOperand(3));
5696    TmpInst.addOperand(Inst.getOperand(4));
5697    Inst = TmpInst;
5698    return true;
5699  }
5700  // Aliases for alternate PC+imm syntax of LDR instructions.
5701  case ARM::t2LDRpcrel:
5702    // Select the narrow version if the immediate will fit.
5703    if (Inst.getOperand(1).getImm() > 0 &&
5704        Inst.getOperand(1).getImm() <= 0xff)
5705      Inst.setOpcode(ARM::tLDRpci);
5706    else
5707      Inst.setOpcode(ARM::t2LDRpci);
5708    return true;
5709  case ARM::t2LDRBpcrel:
5710    Inst.setOpcode(ARM::t2LDRBpci);
5711    return true;
5712  case ARM::t2LDRHpcrel:
5713    Inst.setOpcode(ARM::t2LDRHpci);
5714    return true;
5715  case ARM::t2LDRSBpcrel:
5716    Inst.setOpcode(ARM::t2LDRSBpci);
5717    return true;
5718  case ARM::t2LDRSHpcrel:
5719    Inst.setOpcode(ARM::t2LDRSHpci);
5720    return true;
5721  // Handle NEON VST complex aliases.
5722  case ARM::VST1LNdWB_register_Asm_8:
5723  case ARM::VST1LNdWB_register_Asm_16:
5724  case ARM::VST1LNdWB_register_Asm_32: {
5725    MCInst TmpInst;
5726    // Shuffle the operands around so the lane index operand is in the
5727    // right place.
5728    unsigned Spacing;
5729    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5730    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5731    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5732    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5733    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5734    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5735    TmpInst.addOperand(Inst.getOperand(1)); // lane
5736    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5737    TmpInst.addOperand(Inst.getOperand(6));
5738    Inst = TmpInst;
5739    return true;
5740  }
5741
5742  case ARM::VST2LNdWB_register_Asm_8:
5743  case ARM::VST2LNdWB_register_Asm_16:
5744  case ARM::VST2LNdWB_register_Asm_32:
5745  case ARM::VST2LNqWB_register_Asm_16:
5746  case ARM::VST2LNqWB_register_Asm_32: {
5747    MCInst TmpInst;
5748    // Shuffle the operands around so the lane index operand is in the
5749    // right place.
5750    unsigned Spacing;
5751    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5752    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5753    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5754    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5755    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5756    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5757    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5758                                            Spacing));
5759    TmpInst.addOperand(Inst.getOperand(1)); // lane
5760    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5761    TmpInst.addOperand(Inst.getOperand(6));
5762    Inst = TmpInst;
5763    return true;
5764  }
5765
5766  case ARM::VST3LNdWB_register_Asm_8:
5767  case ARM::VST3LNdWB_register_Asm_16:
5768  case ARM::VST3LNdWB_register_Asm_32:
5769  case ARM::VST3LNqWB_register_Asm_16:
5770  case ARM::VST3LNqWB_register_Asm_32: {
5771    MCInst TmpInst;
5772    // Shuffle the operands around so the lane index operand is in the
5773    // right place.
5774    unsigned Spacing;
5775    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5776    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5777    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5778    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5779    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5780    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5781    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5782                                            Spacing));
5783    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5784                                            Spacing * 2));
5785    TmpInst.addOperand(Inst.getOperand(1)); // lane
5786    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5787    TmpInst.addOperand(Inst.getOperand(6));
5788    Inst = TmpInst;
5789    return true;
5790  }
5791
5792  case ARM::VST4LNdWB_register_Asm_8:
5793  case ARM::VST4LNdWB_register_Asm_16:
5794  case ARM::VST4LNdWB_register_Asm_32:
5795  case ARM::VST4LNqWB_register_Asm_16:
5796  case ARM::VST4LNqWB_register_Asm_32: {
5797    MCInst TmpInst;
5798    // Shuffle the operands around so the lane index operand is in the
5799    // right place.
5800    unsigned Spacing;
5801    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5802    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5803    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5804    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5805    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5806    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5807    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5808                                            Spacing));
5809    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5810                                            Spacing * 2));
5811    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5812                                            Spacing * 3));
5813    TmpInst.addOperand(Inst.getOperand(1)); // lane
5814    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5815    TmpInst.addOperand(Inst.getOperand(6));
5816    Inst = TmpInst;
5817    return true;
5818  }
5819
5820  case ARM::VST1LNdWB_fixed_Asm_8:
5821  case ARM::VST1LNdWB_fixed_Asm_16:
5822  case ARM::VST1LNdWB_fixed_Asm_32: {
5823    MCInst TmpInst;
5824    // Shuffle the operands around so the lane index operand is in the
5825    // right place.
5826    unsigned Spacing;
5827    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5828    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5829    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5830    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5831    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5832    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5833    TmpInst.addOperand(Inst.getOperand(1)); // lane
5834    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5835    TmpInst.addOperand(Inst.getOperand(5));
5836    Inst = TmpInst;
5837    return true;
5838  }
5839
5840  case ARM::VST2LNdWB_fixed_Asm_8:
5841  case ARM::VST2LNdWB_fixed_Asm_16:
5842  case ARM::VST2LNdWB_fixed_Asm_32:
5843  case ARM::VST2LNqWB_fixed_Asm_16:
5844  case ARM::VST2LNqWB_fixed_Asm_32: {
5845    MCInst TmpInst;
5846    // Shuffle the operands around so the lane index operand is in the
5847    // right place.
5848    unsigned Spacing;
5849    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5850    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5851    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5852    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5853    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5854    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5855    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5856                                            Spacing));
5857    TmpInst.addOperand(Inst.getOperand(1)); // lane
5858    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5859    TmpInst.addOperand(Inst.getOperand(5));
5860    Inst = TmpInst;
5861    return true;
5862  }
5863
5864  case ARM::VST3LNdWB_fixed_Asm_8:
5865  case ARM::VST3LNdWB_fixed_Asm_16:
5866  case ARM::VST3LNdWB_fixed_Asm_32:
5867  case ARM::VST3LNqWB_fixed_Asm_16:
5868  case ARM::VST3LNqWB_fixed_Asm_32: {
5869    MCInst TmpInst;
5870    // Shuffle the operands around so the lane index operand is in the
5871    // right place.
5872    unsigned Spacing;
5873    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5874    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5875    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5876    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5877    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5878    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5879    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5880                                            Spacing));
5881    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5882                                            Spacing * 2));
5883    TmpInst.addOperand(Inst.getOperand(1)); // lane
5884    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5885    TmpInst.addOperand(Inst.getOperand(5));
5886    Inst = TmpInst;
5887    return true;
5888  }
5889
5890  case ARM::VST4LNdWB_fixed_Asm_8:
5891  case ARM::VST4LNdWB_fixed_Asm_16:
5892  case ARM::VST4LNdWB_fixed_Asm_32:
5893  case ARM::VST4LNqWB_fixed_Asm_16:
5894  case ARM::VST4LNqWB_fixed_Asm_32: {
5895    MCInst TmpInst;
5896    // Shuffle the operands around so the lane index operand is in the
5897    // right place.
5898    unsigned Spacing;
5899    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5900    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5901    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5902    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5903    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5904    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5905    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5906                                            Spacing));
5907    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5908                                            Spacing * 2));
5909    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5910                                            Spacing * 3));
5911    TmpInst.addOperand(Inst.getOperand(1)); // lane
5912    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5913    TmpInst.addOperand(Inst.getOperand(5));
5914    Inst = TmpInst;
5915    return true;
5916  }
5917
5918  case ARM::VST1LNdAsm_8:
5919  case ARM::VST1LNdAsm_16:
5920  case ARM::VST1LNdAsm_32: {
5921    MCInst TmpInst;
5922    // Shuffle the operands around so the lane index operand is in the
5923    // right place.
5924    unsigned Spacing;
5925    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5926    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5927    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5928    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5929    TmpInst.addOperand(Inst.getOperand(1)); // lane
5930    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5931    TmpInst.addOperand(Inst.getOperand(5));
5932    Inst = TmpInst;
5933    return true;
5934  }
5935
5936  case ARM::VST2LNdAsm_8:
5937  case ARM::VST2LNdAsm_16:
5938  case ARM::VST2LNdAsm_32:
5939  case ARM::VST2LNqAsm_16:
5940  case ARM::VST2LNqAsm_32: {
5941    MCInst TmpInst;
5942    // Shuffle the operands around so the lane index operand is in the
5943    // right place.
5944    unsigned Spacing;
5945    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5946    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5947    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5948    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5949    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5950                                            Spacing));
5951    TmpInst.addOperand(Inst.getOperand(1)); // lane
5952    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5953    TmpInst.addOperand(Inst.getOperand(5));
5954    Inst = TmpInst;
5955    return true;
5956  }
5957
5958  case ARM::VST3LNdAsm_8:
5959  case ARM::VST3LNdAsm_16:
5960  case ARM::VST3LNdAsm_32:
5961  case ARM::VST3LNqAsm_16:
5962  case ARM::VST3LNqAsm_32: {
5963    MCInst TmpInst;
5964    // Shuffle the operands around so the lane index operand is in the
5965    // right place.
5966    unsigned Spacing;
5967    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5968    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5969    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5970    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5971    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5972                                            Spacing));
5973    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5974                                            Spacing * 2));
5975    TmpInst.addOperand(Inst.getOperand(1)); // lane
5976    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5977    TmpInst.addOperand(Inst.getOperand(5));
5978    Inst = TmpInst;
5979    return true;
5980  }
5981
5982  case ARM::VST4LNdAsm_8:
5983  case ARM::VST4LNdAsm_16:
5984  case ARM::VST4LNdAsm_32:
5985  case ARM::VST4LNqAsm_16:
5986  case ARM::VST4LNqAsm_32: {
5987    MCInst TmpInst;
5988    // Shuffle the operands around so the lane index operand is in the
5989    // right place.
5990    unsigned Spacing;
5991    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5992    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5993    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5994    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5995    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5996                                            Spacing));
5997    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5998                                            Spacing * 2));
5999    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6000                                            Spacing * 3));
6001    TmpInst.addOperand(Inst.getOperand(1)); // lane
6002    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6003    TmpInst.addOperand(Inst.getOperand(5));
6004    Inst = TmpInst;
6005    return true;
6006  }
6007
6008  // Handle NEON VLD complex aliases.
6009  case ARM::VLD1LNdWB_register_Asm_8:
6010  case ARM::VLD1LNdWB_register_Asm_16:
6011  case ARM::VLD1LNdWB_register_Asm_32: {
6012    MCInst TmpInst;
6013    // Shuffle the operands around so the lane index operand is in the
6014    // right place.
6015    unsigned Spacing;
6016    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6017    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6018    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6019    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6020    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6021    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6022    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6023    TmpInst.addOperand(Inst.getOperand(1)); // lane
6024    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6025    TmpInst.addOperand(Inst.getOperand(6));
6026    Inst = TmpInst;
6027    return true;
6028  }
6029
6030  case ARM::VLD2LNdWB_register_Asm_8:
6031  case ARM::VLD2LNdWB_register_Asm_16:
6032  case ARM::VLD2LNdWB_register_Asm_32:
6033  case ARM::VLD2LNqWB_register_Asm_16:
6034  case ARM::VLD2LNqWB_register_Asm_32: {
6035    MCInst TmpInst;
6036    // Shuffle the operands around so the lane index operand is in the
6037    // right place.
6038    unsigned Spacing;
6039    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6040    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6041    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6042                                            Spacing));
6043    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6044    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6045    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6046    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6047    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6048    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6049                                            Spacing));
6050    TmpInst.addOperand(Inst.getOperand(1)); // lane
6051    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6052    TmpInst.addOperand(Inst.getOperand(6));
6053    Inst = TmpInst;
6054    return true;
6055  }
6056
6057  case ARM::VLD3LNdWB_register_Asm_8:
6058  case ARM::VLD3LNdWB_register_Asm_16:
6059  case ARM::VLD3LNdWB_register_Asm_32:
6060  case ARM::VLD3LNqWB_register_Asm_16:
6061  case ARM::VLD3LNqWB_register_Asm_32: {
6062    MCInst TmpInst;
6063    // Shuffle the operands around so the lane index operand is in the
6064    // right place.
6065    unsigned Spacing;
6066    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6067    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6068    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6069                                            Spacing));
6070    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6071                                            Spacing * 2));
6072    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6073    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6074    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6075    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6076    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6077    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6078                                            Spacing));
6079    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6080                                            Spacing * 2));
6081    TmpInst.addOperand(Inst.getOperand(1)); // lane
6082    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6083    TmpInst.addOperand(Inst.getOperand(6));
6084    Inst = TmpInst;
6085    return true;
6086  }
6087
6088  case ARM::VLD4LNdWB_register_Asm_8:
6089  case ARM::VLD4LNdWB_register_Asm_16:
6090  case ARM::VLD4LNdWB_register_Asm_32:
6091  case ARM::VLD4LNqWB_register_Asm_16:
6092  case ARM::VLD4LNqWB_register_Asm_32: {
6093    MCInst TmpInst;
6094    // Shuffle the operands around so the lane index operand is in the
6095    // right place.
6096    unsigned Spacing;
6097    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6098    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6099    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6100                                            Spacing));
6101    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6102                                            Spacing * 2));
6103    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6104                                            Spacing * 3));
6105    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6106    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6107    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6108    TmpInst.addOperand(Inst.getOperand(4)); // Rm
6109    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6110    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6111                                            Spacing));
6112    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6113                                            Spacing * 2));
6114    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6115                                            Spacing * 3));
6116    TmpInst.addOperand(Inst.getOperand(1)); // lane
6117    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
6118    TmpInst.addOperand(Inst.getOperand(6));
6119    Inst = TmpInst;
6120    return true;
6121  }
6122
6123  case ARM::VLD1LNdWB_fixed_Asm_8:
6124  case ARM::VLD1LNdWB_fixed_Asm_16:
6125  case ARM::VLD1LNdWB_fixed_Asm_32: {
6126    MCInst TmpInst;
6127    // Shuffle the operands around so the lane index operand is in the
6128    // right place.
6129    unsigned Spacing;
6130    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6131    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6132    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6133    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6134    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6135    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6136    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6137    TmpInst.addOperand(Inst.getOperand(1)); // lane
6138    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6139    TmpInst.addOperand(Inst.getOperand(5));
6140    Inst = TmpInst;
6141    return true;
6142  }
6143
6144  case ARM::VLD2LNdWB_fixed_Asm_8:
6145  case ARM::VLD2LNdWB_fixed_Asm_16:
6146  case ARM::VLD2LNdWB_fixed_Asm_32:
6147  case ARM::VLD2LNqWB_fixed_Asm_16:
6148  case ARM::VLD2LNqWB_fixed_Asm_32: {
6149    MCInst TmpInst;
6150    // Shuffle the operands around so the lane index operand is in the
6151    // right place.
6152    unsigned Spacing;
6153    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6154    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6155    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6156                                            Spacing));
6157    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6158    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6159    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6160    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6161    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6162    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6163                                            Spacing));
6164    TmpInst.addOperand(Inst.getOperand(1)); // lane
6165    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6166    TmpInst.addOperand(Inst.getOperand(5));
6167    Inst = TmpInst;
6168    return true;
6169  }
6170
6171  case ARM::VLD3LNdWB_fixed_Asm_8:
6172  case ARM::VLD3LNdWB_fixed_Asm_16:
6173  case ARM::VLD3LNdWB_fixed_Asm_32:
6174  case ARM::VLD3LNqWB_fixed_Asm_16:
6175  case ARM::VLD3LNqWB_fixed_Asm_32: {
6176    MCInst TmpInst;
6177    // Shuffle the operands around so the lane index operand is in the
6178    // right place.
6179    unsigned Spacing;
6180    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6181    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6182    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6183                                            Spacing));
6184    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6185                                            Spacing * 2));
6186    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6187    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6188    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6189    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6190    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6191    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6192                                            Spacing));
6193    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6194                                            Spacing * 2));
6195    TmpInst.addOperand(Inst.getOperand(1)); // lane
6196    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6197    TmpInst.addOperand(Inst.getOperand(5));
6198    Inst = TmpInst;
6199    return true;
6200  }
6201
6202  case ARM::VLD4LNdWB_fixed_Asm_8:
6203  case ARM::VLD4LNdWB_fixed_Asm_16:
6204  case ARM::VLD4LNdWB_fixed_Asm_32:
6205  case ARM::VLD4LNqWB_fixed_Asm_16:
6206  case ARM::VLD4LNqWB_fixed_Asm_32: {
6207    MCInst TmpInst;
6208    // Shuffle the operands around so the lane index operand is in the
6209    // right place.
6210    unsigned Spacing;
6211    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6212    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6213    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6214                                            Spacing));
6215    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6216                                            Spacing * 2));
6217    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6218                                            Spacing * 3));
6219    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6220    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6221    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6222    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6223    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6224    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6225                                            Spacing));
6226    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6227                                            Spacing * 2));
6228    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6229                                            Spacing * 3));
6230    TmpInst.addOperand(Inst.getOperand(1)); // lane
6231    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6232    TmpInst.addOperand(Inst.getOperand(5));
6233    Inst = TmpInst;
6234    return true;
6235  }
6236
6237  case ARM::VLD1LNdAsm_8:
6238  case ARM::VLD1LNdAsm_16:
6239  case ARM::VLD1LNdAsm_32: {
6240    MCInst TmpInst;
6241    // Shuffle the operands around so the lane index operand is in the
6242    // right place.
6243    unsigned Spacing;
6244    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6245    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6246    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6247    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6248    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6249    TmpInst.addOperand(Inst.getOperand(1)); // lane
6250    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6251    TmpInst.addOperand(Inst.getOperand(5));
6252    Inst = TmpInst;
6253    return true;
6254  }
6255
6256  case ARM::VLD2LNdAsm_8:
6257  case ARM::VLD2LNdAsm_16:
6258  case ARM::VLD2LNdAsm_32:
6259  case ARM::VLD2LNqAsm_16:
6260  case ARM::VLD2LNqAsm_32: {
6261    MCInst TmpInst;
6262    // Shuffle the operands around so the lane index operand is in the
6263    // right place.
6264    unsigned Spacing;
6265    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6266    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6267    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6268                                            Spacing));
6269    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6270    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6271    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6272    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6273                                            Spacing));
6274    TmpInst.addOperand(Inst.getOperand(1)); // lane
6275    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6276    TmpInst.addOperand(Inst.getOperand(5));
6277    Inst = TmpInst;
6278    return true;
6279  }
6280
6281  case ARM::VLD3LNdAsm_8:
6282  case ARM::VLD3LNdAsm_16:
6283  case ARM::VLD3LNdAsm_32:
6284  case ARM::VLD3LNqAsm_16:
6285  case ARM::VLD3LNqAsm_32: {
6286    MCInst TmpInst;
6287    // Shuffle the operands around so the lane index operand is in the
6288    // right place.
6289    unsigned Spacing;
6290    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6291    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6292    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6293                                            Spacing));
6294    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6295                                            Spacing * 2));
6296    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6297    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6298    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6299    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6300                                            Spacing));
6301    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6302                                            Spacing * 2));
6303    TmpInst.addOperand(Inst.getOperand(1)); // lane
6304    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6305    TmpInst.addOperand(Inst.getOperand(5));
6306    Inst = TmpInst;
6307    return true;
6308  }
6309
6310  case ARM::VLD4LNdAsm_8:
6311  case ARM::VLD4LNdAsm_16:
6312  case ARM::VLD4LNdAsm_32:
6313  case ARM::VLD4LNqAsm_16:
6314  case ARM::VLD4LNqAsm_32: {
6315    MCInst TmpInst;
6316    // Shuffle the operands around so the lane index operand is in the
6317    // right place.
6318    unsigned Spacing;
6319    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6320    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6321    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6322                                            Spacing));
6323    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6324                                            Spacing * 2));
6325    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6326                                            Spacing * 3));
6327    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6328    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6329    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6330    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6331                                            Spacing));
6332    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6333                                            Spacing * 2));
6334    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6335                                            Spacing * 3));
6336    TmpInst.addOperand(Inst.getOperand(1)); // lane
6337    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6338    TmpInst.addOperand(Inst.getOperand(5));
6339    Inst = TmpInst;
6340    return true;
6341  }
6342
6343  // VLD3DUP single 3-element structure to all lanes instructions.
6344  case ARM::VLD3DUPdAsm_8:
6345  case ARM::VLD3DUPdAsm_16:
6346  case ARM::VLD3DUPdAsm_32:
6347  case ARM::VLD3DUPqAsm_8:
6348  case ARM::VLD3DUPqAsm_16:
6349  case ARM::VLD3DUPqAsm_32: {
6350    MCInst TmpInst;
6351    unsigned Spacing;
6352    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6353    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6354    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6355                                            Spacing));
6356    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6357                                            Spacing * 2));
6358    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6359    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6360    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6361    TmpInst.addOperand(Inst.getOperand(4));
6362    Inst = TmpInst;
6363    return true;
6364  }
6365
6366  case ARM::VLD3DUPdWB_fixed_Asm_8:
6367  case ARM::VLD3DUPdWB_fixed_Asm_16:
6368  case ARM::VLD3DUPdWB_fixed_Asm_32:
6369  case ARM::VLD3DUPqWB_fixed_Asm_8:
6370  case ARM::VLD3DUPqWB_fixed_Asm_16:
6371  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6372    MCInst TmpInst;
6373    unsigned Spacing;
6374    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6375    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6376    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6377                                            Spacing));
6378    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6379                                            Spacing * 2));
6380    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6381    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6382    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6383    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6384    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6385    TmpInst.addOperand(Inst.getOperand(4));
6386    Inst = TmpInst;
6387    return true;
6388  }
6389
6390  case ARM::VLD3DUPdWB_register_Asm_8:
6391  case ARM::VLD3DUPdWB_register_Asm_16:
6392  case ARM::VLD3DUPdWB_register_Asm_32:
6393  case ARM::VLD3DUPqWB_register_Asm_8:
6394  case ARM::VLD3DUPqWB_register_Asm_16:
6395  case ARM::VLD3DUPqWB_register_Asm_32: {
6396    MCInst TmpInst;
6397    unsigned Spacing;
6398    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6399    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6400    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6401                                            Spacing));
6402    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6403                                            Spacing * 2));
6404    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6405    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6406    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6407    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6408    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6409    TmpInst.addOperand(Inst.getOperand(5));
6410    Inst = TmpInst;
6411    return true;
6412  }
6413
6414  // VLD3 multiple 3-element structure instructions.
6415  case ARM::VLD3dAsm_8:
6416  case ARM::VLD3dAsm_16:
6417  case ARM::VLD3dAsm_32:
6418  case ARM::VLD3qAsm_8:
6419  case ARM::VLD3qAsm_16:
6420  case ARM::VLD3qAsm_32: {
6421    MCInst TmpInst;
6422    unsigned Spacing;
6423    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6424    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6425    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6426                                            Spacing));
6427    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6428                                            Spacing * 2));
6429    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6430    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6431    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6432    TmpInst.addOperand(Inst.getOperand(4));
6433    Inst = TmpInst;
6434    return true;
6435  }
6436
6437  case ARM::VLD3dWB_fixed_Asm_8:
6438  case ARM::VLD3dWB_fixed_Asm_16:
6439  case ARM::VLD3dWB_fixed_Asm_32:
6440  case ARM::VLD3qWB_fixed_Asm_8:
6441  case ARM::VLD3qWB_fixed_Asm_16:
6442  case ARM::VLD3qWB_fixed_Asm_32: {
6443    MCInst TmpInst;
6444    unsigned Spacing;
6445    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6446    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6447    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6448                                            Spacing));
6449    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6450                                            Spacing * 2));
6451    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6452    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6453    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6454    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6455    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6456    TmpInst.addOperand(Inst.getOperand(4));
6457    Inst = TmpInst;
6458    return true;
6459  }
6460
6461  case ARM::VLD3dWB_register_Asm_8:
6462  case ARM::VLD3dWB_register_Asm_16:
6463  case ARM::VLD3dWB_register_Asm_32:
6464  case ARM::VLD3qWB_register_Asm_8:
6465  case ARM::VLD3qWB_register_Asm_16:
6466  case ARM::VLD3qWB_register_Asm_32: {
6467    MCInst TmpInst;
6468    unsigned Spacing;
6469    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6470    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6471    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6472                                            Spacing));
6473    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6474                                            Spacing * 2));
6475    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6476    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6477    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6478    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6479    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6480    TmpInst.addOperand(Inst.getOperand(5));
6481    Inst = TmpInst;
6482    return true;
6483  }
6484
6485  // VLD4DUP single 3-element structure to all lanes instructions.
6486  case ARM::VLD4DUPdAsm_8:
6487  case ARM::VLD4DUPdAsm_16:
6488  case ARM::VLD4DUPdAsm_32:
6489  case ARM::VLD4DUPqAsm_8:
6490  case ARM::VLD4DUPqAsm_16:
6491  case ARM::VLD4DUPqAsm_32: {
6492    MCInst TmpInst;
6493    unsigned Spacing;
6494    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6495    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6496    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6497                                            Spacing));
6498    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6499                                            Spacing * 2));
6500    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6501                                            Spacing * 3));
6502    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6503    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6504    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6505    TmpInst.addOperand(Inst.getOperand(4));
6506    Inst = TmpInst;
6507    return true;
6508  }
6509
6510  case ARM::VLD4DUPdWB_fixed_Asm_8:
6511  case ARM::VLD4DUPdWB_fixed_Asm_16:
6512  case ARM::VLD4DUPdWB_fixed_Asm_32:
6513  case ARM::VLD4DUPqWB_fixed_Asm_8:
6514  case ARM::VLD4DUPqWB_fixed_Asm_16:
6515  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6516    MCInst TmpInst;
6517    unsigned Spacing;
6518    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6519    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6520    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6521                                            Spacing));
6522    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6523                                            Spacing * 2));
6524    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6525                                            Spacing * 3));
6526    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6527    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6528    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6529    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6530    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6531    TmpInst.addOperand(Inst.getOperand(4));
6532    Inst = TmpInst;
6533    return true;
6534  }
6535
6536  case ARM::VLD4DUPdWB_register_Asm_8:
6537  case ARM::VLD4DUPdWB_register_Asm_16:
6538  case ARM::VLD4DUPdWB_register_Asm_32:
6539  case ARM::VLD4DUPqWB_register_Asm_8:
6540  case ARM::VLD4DUPqWB_register_Asm_16:
6541  case ARM::VLD4DUPqWB_register_Asm_32: {
6542    MCInst TmpInst;
6543    unsigned Spacing;
6544    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6545    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6546    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6547                                            Spacing));
6548    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6549                                            Spacing * 2));
6550    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6551                                            Spacing * 3));
6552    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6553    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6554    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6555    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6556    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6557    TmpInst.addOperand(Inst.getOperand(5));
6558    Inst = TmpInst;
6559    return true;
6560  }
6561
6562  // VLD4 multiple 4-element structure instructions.
6563  case ARM::VLD4dAsm_8:
6564  case ARM::VLD4dAsm_16:
6565  case ARM::VLD4dAsm_32:
6566  case ARM::VLD4qAsm_8:
6567  case ARM::VLD4qAsm_16:
6568  case ARM::VLD4qAsm_32: {
6569    MCInst TmpInst;
6570    unsigned Spacing;
6571    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6572    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6573    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6574                                            Spacing));
6575    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6576                                            Spacing * 2));
6577    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6578                                            Spacing * 3));
6579    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6580    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6581    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6582    TmpInst.addOperand(Inst.getOperand(4));
6583    Inst = TmpInst;
6584    return true;
6585  }
6586
6587  case ARM::VLD4dWB_fixed_Asm_8:
6588  case ARM::VLD4dWB_fixed_Asm_16:
6589  case ARM::VLD4dWB_fixed_Asm_32:
6590  case ARM::VLD4qWB_fixed_Asm_8:
6591  case ARM::VLD4qWB_fixed_Asm_16:
6592  case ARM::VLD4qWB_fixed_Asm_32: {
6593    MCInst TmpInst;
6594    unsigned Spacing;
6595    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6596    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6597    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6598                                            Spacing));
6599    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6600                                            Spacing * 2));
6601    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6602                                            Spacing * 3));
6603    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6604    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6605    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6606    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6607    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6608    TmpInst.addOperand(Inst.getOperand(4));
6609    Inst = TmpInst;
6610    return true;
6611  }
6612
6613  case ARM::VLD4dWB_register_Asm_8:
6614  case ARM::VLD4dWB_register_Asm_16:
6615  case ARM::VLD4dWB_register_Asm_32:
6616  case ARM::VLD4qWB_register_Asm_8:
6617  case ARM::VLD4qWB_register_Asm_16:
6618  case ARM::VLD4qWB_register_Asm_32: {
6619    MCInst TmpInst;
6620    unsigned Spacing;
6621    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6622    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6623    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6624                                            Spacing));
6625    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6626                                            Spacing * 2));
6627    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6628                                            Spacing * 3));
6629    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6630    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6631    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6632    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6633    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6634    TmpInst.addOperand(Inst.getOperand(5));
6635    Inst = TmpInst;
6636    return true;
6637  }
6638
6639  // VST3 multiple 3-element structure instructions.
6640  case ARM::VST3dAsm_8:
6641  case ARM::VST3dAsm_16:
6642  case ARM::VST3dAsm_32:
6643  case ARM::VST3qAsm_8:
6644  case ARM::VST3qAsm_16:
6645  case ARM::VST3qAsm_32: {
6646    MCInst TmpInst;
6647    unsigned Spacing;
6648    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6649    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6650    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6651    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6652    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6653                                            Spacing));
6654    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6655                                            Spacing * 2));
6656    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6657    TmpInst.addOperand(Inst.getOperand(4));
6658    Inst = TmpInst;
6659    return true;
6660  }
6661
6662  case ARM::VST3dWB_fixed_Asm_8:
6663  case ARM::VST3dWB_fixed_Asm_16:
6664  case ARM::VST3dWB_fixed_Asm_32:
6665  case ARM::VST3qWB_fixed_Asm_8:
6666  case ARM::VST3qWB_fixed_Asm_16:
6667  case ARM::VST3qWB_fixed_Asm_32: {
6668    MCInst TmpInst;
6669    unsigned Spacing;
6670    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6671    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6672    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6673    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6674    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6675    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6676    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6677                                            Spacing));
6678    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6679                                            Spacing * 2));
6680    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6681    TmpInst.addOperand(Inst.getOperand(4));
6682    Inst = TmpInst;
6683    return true;
6684  }
6685
6686  case ARM::VST3dWB_register_Asm_8:
6687  case ARM::VST3dWB_register_Asm_16:
6688  case ARM::VST3dWB_register_Asm_32:
6689  case ARM::VST3qWB_register_Asm_8:
6690  case ARM::VST3qWB_register_Asm_16:
6691  case ARM::VST3qWB_register_Asm_32: {
6692    MCInst TmpInst;
6693    unsigned Spacing;
6694    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6695    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6696    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6697    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6698    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6699    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6700    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6701                                            Spacing));
6702    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6703                                            Spacing * 2));
6704    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6705    TmpInst.addOperand(Inst.getOperand(5));
6706    Inst = TmpInst;
6707    return true;
6708  }
6709
6710  // VST4 multiple 3-element structure instructions.
6711  case ARM::VST4dAsm_8:
6712  case ARM::VST4dAsm_16:
6713  case ARM::VST4dAsm_32:
6714  case ARM::VST4qAsm_8:
6715  case ARM::VST4qAsm_16:
6716  case ARM::VST4qAsm_32: {
6717    MCInst TmpInst;
6718    unsigned Spacing;
6719    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6720    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6721    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6722    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6723    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6724                                            Spacing));
6725    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6726                                            Spacing * 2));
6727    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6728                                            Spacing * 3));
6729    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6730    TmpInst.addOperand(Inst.getOperand(4));
6731    Inst = TmpInst;
6732    return true;
6733  }
6734
6735  case ARM::VST4dWB_fixed_Asm_8:
6736  case ARM::VST4dWB_fixed_Asm_16:
6737  case ARM::VST4dWB_fixed_Asm_32:
6738  case ARM::VST4qWB_fixed_Asm_8:
6739  case ARM::VST4qWB_fixed_Asm_16:
6740  case ARM::VST4qWB_fixed_Asm_32: {
6741    MCInst TmpInst;
6742    unsigned Spacing;
6743    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6744    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6745    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6746    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6747    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6748    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6749    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6750                                            Spacing));
6751    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6752                                            Spacing * 2));
6753    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6754                                            Spacing * 3));
6755    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6756    TmpInst.addOperand(Inst.getOperand(4));
6757    Inst = TmpInst;
6758    return true;
6759  }
6760
6761  case ARM::VST4dWB_register_Asm_8:
6762  case ARM::VST4dWB_register_Asm_16:
6763  case ARM::VST4dWB_register_Asm_32:
6764  case ARM::VST4qWB_register_Asm_8:
6765  case ARM::VST4qWB_register_Asm_16:
6766  case ARM::VST4qWB_register_Asm_32: {
6767    MCInst TmpInst;
6768    unsigned Spacing;
6769    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6770    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6771    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6772    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6773    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6774    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6775    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6776                                            Spacing));
6777    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6778                                            Spacing * 2));
6779    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6780                                            Spacing * 3));
6781    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6782    TmpInst.addOperand(Inst.getOperand(5));
6783    Inst = TmpInst;
6784    return true;
6785  }
6786
6787  // Handle encoding choice for the shift-immediate instructions.
6788  case ARM::t2LSLri:
6789  case ARM::t2LSRri:
6790  case ARM::t2ASRri: {
6791    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6792        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6793        Inst.getOperand(5).getReg() == (inITBlock() ? 0 : ARM::CPSR) &&
6794        !(static_cast<ARMOperand*>(Operands[3])->isToken() &&
6795         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w")) {
6796      unsigned NewOpc;
6797      switch (Inst.getOpcode()) {
6798      default: llvm_unreachable("unexpected opcode");
6799      case ARM::t2LSLri: NewOpc = ARM::tLSLri; break;
6800      case ARM::t2LSRri: NewOpc = ARM::tLSRri; break;
6801      case ARM::t2ASRri: NewOpc = ARM::tASRri; break;
6802      }
6803      // The Thumb1 operands aren't in the same order. Awesome, eh?
6804      MCInst TmpInst;
6805      TmpInst.setOpcode(NewOpc);
6806      TmpInst.addOperand(Inst.getOperand(0));
6807      TmpInst.addOperand(Inst.getOperand(5));
6808      TmpInst.addOperand(Inst.getOperand(1));
6809      TmpInst.addOperand(Inst.getOperand(2));
6810      TmpInst.addOperand(Inst.getOperand(3));
6811      TmpInst.addOperand(Inst.getOperand(4));
6812      Inst = TmpInst;
6813      return true;
6814    }
6815    return false;
6816  }
6817
6818  // Handle the Thumb2 mode MOV complex aliases.
6819  case ARM::t2MOVsr:
6820  case ARM::t2MOVSsr: {
6821    // Which instruction to expand to depends on the CCOut operand and
6822    // whether we're in an IT block if the register operands are low
6823    // registers.
6824    bool isNarrow = false;
6825    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6826        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6827        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6828        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6829        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6830      isNarrow = true;
6831    MCInst TmpInst;
6832    unsigned newOpc;
6833    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6834    default: llvm_unreachable("unexpected opcode!");
6835    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6836    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6837    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6838    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6839    }
6840    TmpInst.setOpcode(newOpc);
6841    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6842    if (isNarrow)
6843      TmpInst.addOperand(MCOperand::CreateReg(
6844          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6845    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6846    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6847    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6848    TmpInst.addOperand(Inst.getOperand(5));
6849    if (!isNarrow)
6850      TmpInst.addOperand(MCOperand::CreateReg(
6851          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6852    Inst = TmpInst;
6853    return true;
6854  }
6855  case ARM::t2MOVsi:
6856  case ARM::t2MOVSsi: {
6857    // Which instruction to expand to depends on the CCOut operand and
6858    // whether we're in an IT block if the register operands are low
6859    // registers.
6860    bool isNarrow = false;
6861    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6862        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6863        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6864      isNarrow = true;
6865    MCInst TmpInst;
6866    unsigned newOpc;
6867    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6868    default: llvm_unreachable("unexpected opcode!");
6869    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6870    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6871    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6872    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6873    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6874    }
6875    unsigned Amount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6876    if (Amount == 32) Amount = 0;
6877    TmpInst.setOpcode(newOpc);
6878    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6879    if (isNarrow)
6880      TmpInst.addOperand(MCOperand::CreateReg(
6881          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6882    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6883    if (newOpc != ARM::t2RRX)
6884      TmpInst.addOperand(MCOperand::CreateImm(Amount));
6885    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6886    TmpInst.addOperand(Inst.getOperand(4));
6887    if (!isNarrow)
6888      TmpInst.addOperand(MCOperand::CreateReg(
6889          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6890    Inst = TmpInst;
6891    return true;
6892  }
6893  // Handle the ARM mode MOV complex aliases.
6894  case ARM::ASRr:
6895  case ARM::LSRr:
6896  case ARM::LSLr:
6897  case ARM::RORr: {
6898    ARM_AM::ShiftOpc ShiftTy;
6899    switch(Inst.getOpcode()) {
6900    default: llvm_unreachable("unexpected opcode!");
6901    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6902    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6903    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6904    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6905    }
6906    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6907    MCInst TmpInst;
6908    TmpInst.setOpcode(ARM::MOVsr);
6909    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6910    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6911    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6912    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6913    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6914    TmpInst.addOperand(Inst.getOperand(4));
6915    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6916    Inst = TmpInst;
6917    return true;
6918  }
6919  case ARM::ASRi:
6920  case ARM::LSRi:
6921  case ARM::LSLi:
6922  case ARM::RORi: {
6923    ARM_AM::ShiftOpc ShiftTy;
6924    switch(Inst.getOpcode()) {
6925    default: llvm_unreachable("unexpected opcode!");
6926    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6927    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6928    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6929    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6930    }
6931    // A shift by zero is a plain MOVr, not a MOVsi.
6932    unsigned Amt = Inst.getOperand(2).getImm();
6933    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6934    // A shift by 32 should be encoded as 0 when permitted
6935    if (Amt == 32 && (ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr))
6936      Amt = 0;
6937    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6938    MCInst TmpInst;
6939    TmpInst.setOpcode(Opc);
6940    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6941    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6942    if (Opc == ARM::MOVsi)
6943      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6944    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6945    TmpInst.addOperand(Inst.getOperand(4));
6946    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6947    Inst = TmpInst;
6948    return true;
6949  }
6950  case ARM::RRXi: {
6951    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6952    MCInst TmpInst;
6953    TmpInst.setOpcode(ARM::MOVsi);
6954    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6955    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6956    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6957    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6958    TmpInst.addOperand(Inst.getOperand(3));
6959    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6960    Inst = TmpInst;
6961    return true;
6962  }
6963  case ARM::t2LDMIA_UPD: {
6964    // If this is a load of a single register, then we should use
6965    // a post-indexed LDR instruction instead, per the ARM ARM.
6966    if (Inst.getNumOperands() != 5)
6967      return false;
6968    MCInst TmpInst;
6969    TmpInst.setOpcode(ARM::t2LDR_POST);
6970    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6971    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6972    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6973    TmpInst.addOperand(MCOperand::CreateImm(4));
6974    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6975    TmpInst.addOperand(Inst.getOperand(3));
6976    Inst = TmpInst;
6977    return true;
6978  }
6979  case ARM::t2STMDB_UPD: {
6980    // If this is a store of a single register, then we should use
6981    // a pre-indexed STR instruction instead, per the ARM ARM.
6982    if (Inst.getNumOperands() != 5)
6983      return false;
6984    MCInst TmpInst;
6985    TmpInst.setOpcode(ARM::t2STR_PRE);
6986    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6987    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6988    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6989    TmpInst.addOperand(MCOperand::CreateImm(-4));
6990    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6991    TmpInst.addOperand(Inst.getOperand(3));
6992    Inst = TmpInst;
6993    return true;
6994  }
6995  case ARM::LDMIA_UPD:
6996    // If this is a load of a single register via a 'pop', then we should use
6997    // a post-indexed LDR instruction instead, per the ARM ARM.
6998    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6999        Inst.getNumOperands() == 5) {
7000      MCInst TmpInst;
7001      TmpInst.setOpcode(ARM::LDR_POST_IMM);
7002      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7003      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7004      TmpInst.addOperand(Inst.getOperand(1)); // Rn
7005      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
7006      TmpInst.addOperand(MCOperand::CreateImm(4));
7007      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7008      TmpInst.addOperand(Inst.getOperand(3));
7009      Inst = TmpInst;
7010      return true;
7011    }
7012    break;
7013  case ARM::STMDB_UPD:
7014    // If this is a store of a single register via a 'push', then we should use
7015    // a pre-indexed STR instruction instead, per the ARM ARM.
7016    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
7017        Inst.getNumOperands() == 5) {
7018      MCInst TmpInst;
7019      TmpInst.setOpcode(ARM::STR_PRE_IMM);
7020      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
7021      TmpInst.addOperand(Inst.getOperand(4)); // Rt
7022      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
7023      TmpInst.addOperand(MCOperand::CreateImm(-4));
7024      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
7025      TmpInst.addOperand(Inst.getOperand(3));
7026      Inst = TmpInst;
7027    }
7028    break;
7029  case ARM::t2ADDri12:
7030    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
7031    // mnemonic was used (not "addw"), encoding T3 is preferred.
7032    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
7033        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7034      break;
7035    Inst.setOpcode(ARM::t2ADDri);
7036    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7037    break;
7038  case ARM::t2SUBri12:
7039    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
7040    // mnemonic was used (not "subw"), encoding T3 is preferred.
7041    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
7042        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
7043      break;
7044    Inst.setOpcode(ARM::t2SUBri);
7045    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7046    break;
7047  case ARM::tADDi8:
7048    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7049    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7050    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7051    // to encoding T1 if <Rd> is omitted."
7052    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7053      Inst.setOpcode(ARM::tADDi3);
7054      return true;
7055    }
7056    break;
7057  case ARM::tSUBi8:
7058    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
7059    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
7060    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
7061    // to encoding T1 if <Rd> is omitted."
7062    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
7063      Inst.setOpcode(ARM::tSUBi3);
7064      return true;
7065    }
7066    break;
7067  case ARM::t2ADDri:
7068  case ARM::t2SUBri: {
7069    // If the destination and first source operand are the same, and
7070    // the flags are compatible with the current IT status, use encoding T2
7071    // instead of T3. For compatibility with the system 'as'. Make sure the
7072    // wide encoding wasn't explicit.
7073    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7074        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
7075        (unsigned)Inst.getOperand(2).getImm() > 255 ||
7076        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
7077        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
7078        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7079         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7080      break;
7081    MCInst TmpInst;
7082    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
7083                      ARM::tADDi8 : ARM::tSUBi8);
7084    TmpInst.addOperand(Inst.getOperand(0));
7085    TmpInst.addOperand(Inst.getOperand(5));
7086    TmpInst.addOperand(Inst.getOperand(0));
7087    TmpInst.addOperand(Inst.getOperand(2));
7088    TmpInst.addOperand(Inst.getOperand(3));
7089    TmpInst.addOperand(Inst.getOperand(4));
7090    Inst = TmpInst;
7091    return true;
7092  }
7093  case ARM::t2ADDrr: {
7094    // If the destination and first source operand are the same, and
7095    // there's no setting of the flags, use encoding T2 instead of T3.
7096    // Note that this is only for ADD, not SUB. This mirrors the system
7097    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
7098    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
7099        Inst.getOperand(5).getReg() != 0 ||
7100        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7101         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
7102      break;
7103    MCInst TmpInst;
7104    TmpInst.setOpcode(ARM::tADDhirr);
7105    TmpInst.addOperand(Inst.getOperand(0));
7106    TmpInst.addOperand(Inst.getOperand(0));
7107    TmpInst.addOperand(Inst.getOperand(2));
7108    TmpInst.addOperand(Inst.getOperand(3));
7109    TmpInst.addOperand(Inst.getOperand(4));
7110    Inst = TmpInst;
7111    return true;
7112  }
7113  case ARM::tADDrSP: {
7114    // If the non-SP source operand and the destination operand are not the
7115    // same, we need to use the 32-bit encoding if it's available.
7116    if (Inst.getOperand(0).getReg() != Inst.getOperand(2).getReg()) {
7117      Inst.setOpcode(ARM::t2ADDrr);
7118      Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
7119      return true;
7120    }
7121    break;
7122  }
7123  case ARM::tB:
7124    // A Thumb conditional branch outside of an IT block is a tBcc.
7125    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
7126      Inst.setOpcode(ARM::tBcc);
7127      return true;
7128    }
7129    break;
7130  case ARM::t2B:
7131    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
7132    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
7133      Inst.setOpcode(ARM::t2Bcc);
7134      return true;
7135    }
7136    break;
7137  case ARM::t2Bcc:
7138    // If the conditional is AL or we're in an IT block, we really want t2B.
7139    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
7140      Inst.setOpcode(ARM::t2B);
7141      return true;
7142    }
7143    break;
7144  case ARM::tBcc:
7145    // If the conditional is AL, we really want tB.
7146    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
7147      Inst.setOpcode(ARM::tB);
7148      return true;
7149    }
7150    break;
7151  case ARM::tLDMIA: {
7152    // If the register list contains any high registers, or if the writeback
7153    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
7154    // instead if we're in Thumb2. Otherwise, this should have generated
7155    // an error in validateInstruction().
7156    unsigned Rn = Inst.getOperand(0).getReg();
7157    bool hasWritebackToken =
7158      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
7159       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
7160    bool listContainsBase;
7161    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
7162        (!listContainsBase && !hasWritebackToken) ||
7163        (listContainsBase && hasWritebackToken)) {
7164      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7165      assert (isThumbTwo());
7166      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
7167      // If we're switching to the updating version, we need to insert
7168      // the writeback tied operand.
7169      if (hasWritebackToken)
7170        Inst.insert(Inst.begin(),
7171                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
7172      return true;
7173    }
7174    break;
7175  }
7176  case ARM::tSTMIA_UPD: {
7177    // If the register list contains any high registers, we need to use
7178    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7179    // should have generated an error in validateInstruction().
7180    unsigned Rn = Inst.getOperand(0).getReg();
7181    bool listContainsBase;
7182    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7183      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7184      assert (isThumbTwo());
7185      Inst.setOpcode(ARM::t2STMIA_UPD);
7186      return true;
7187    }
7188    break;
7189  }
7190  case ARM::tPOP: {
7191    bool listContainsBase;
7192    // If the register list contains any high registers, we need to use
7193    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7194    // should have generated an error in validateInstruction().
7195    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7196      return false;
7197    assert (isThumbTwo());
7198    Inst.setOpcode(ARM::t2LDMIA_UPD);
7199    // Add the base register and writeback operands.
7200    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7201    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7202    return true;
7203  }
7204  case ARM::tPUSH: {
7205    bool listContainsBase;
7206    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7207      return false;
7208    assert (isThumbTwo());
7209    Inst.setOpcode(ARM::t2STMDB_UPD);
7210    // Add the base register and writeback operands.
7211    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7212    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7213    return true;
7214  }
7215  case ARM::t2MOVi: {
7216    // If we can use the 16-bit encoding and the user didn't explicitly
7217    // request the 32-bit variant, transform it here.
7218    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7219        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7220        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7221         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7222        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7223        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7224         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7225      // The operands aren't in the same order for tMOVi8...
7226      MCInst TmpInst;
7227      TmpInst.setOpcode(ARM::tMOVi8);
7228      TmpInst.addOperand(Inst.getOperand(0));
7229      TmpInst.addOperand(Inst.getOperand(4));
7230      TmpInst.addOperand(Inst.getOperand(1));
7231      TmpInst.addOperand(Inst.getOperand(2));
7232      TmpInst.addOperand(Inst.getOperand(3));
7233      Inst = TmpInst;
7234      return true;
7235    }
7236    break;
7237  }
7238  case ARM::t2MOVr: {
7239    // If we can use the 16-bit encoding and the user didn't explicitly
7240    // request the 32-bit variant, transform it here.
7241    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7242        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7243        Inst.getOperand(2).getImm() == ARMCC::AL &&
7244        Inst.getOperand(4).getReg() == ARM::CPSR &&
7245        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7246         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7247      // The operands aren't the same for tMOV[S]r... (no cc_out)
7248      MCInst TmpInst;
7249      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7250      TmpInst.addOperand(Inst.getOperand(0));
7251      TmpInst.addOperand(Inst.getOperand(1));
7252      TmpInst.addOperand(Inst.getOperand(2));
7253      TmpInst.addOperand(Inst.getOperand(3));
7254      Inst = TmpInst;
7255      return true;
7256    }
7257    break;
7258  }
7259  case ARM::t2SXTH:
7260  case ARM::t2SXTB:
7261  case ARM::t2UXTH:
7262  case ARM::t2UXTB: {
7263    // If we can use the 16-bit encoding and the user didn't explicitly
7264    // request the 32-bit variant, transform it here.
7265    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7266        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7267        Inst.getOperand(2).getImm() == 0 &&
7268        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7269         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7270      unsigned NewOpc;
7271      switch (Inst.getOpcode()) {
7272      default: llvm_unreachable("Illegal opcode!");
7273      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7274      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7275      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7276      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7277      }
7278      // The operands aren't the same for thumb1 (no rotate operand).
7279      MCInst TmpInst;
7280      TmpInst.setOpcode(NewOpc);
7281      TmpInst.addOperand(Inst.getOperand(0));
7282      TmpInst.addOperand(Inst.getOperand(1));
7283      TmpInst.addOperand(Inst.getOperand(3));
7284      TmpInst.addOperand(Inst.getOperand(4));
7285      Inst = TmpInst;
7286      return true;
7287    }
7288    break;
7289  }
7290  case ARM::MOVsi: {
7291    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7292    // rrx shifts and asr/lsr of #32 is encoded as 0
7293    if (SOpc == ARM_AM::rrx || SOpc == ARM_AM::asr || SOpc == ARM_AM::lsr)
7294      return false;
7295    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7296      // Shifting by zero is accepted as a vanilla 'MOVr'
7297      MCInst TmpInst;
7298      TmpInst.setOpcode(ARM::MOVr);
7299      TmpInst.addOperand(Inst.getOperand(0));
7300      TmpInst.addOperand(Inst.getOperand(1));
7301      TmpInst.addOperand(Inst.getOperand(3));
7302      TmpInst.addOperand(Inst.getOperand(4));
7303      TmpInst.addOperand(Inst.getOperand(5));
7304      Inst = TmpInst;
7305      return true;
7306    }
7307    return false;
7308  }
7309  case ARM::ANDrsi:
7310  case ARM::ORRrsi:
7311  case ARM::EORrsi:
7312  case ARM::BICrsi:
7313  case ARM::SUBrsi:
7314  case ARM::ADDrsi: {
7315    unsigned newOpc;
7316    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7317    if (SOpc == ARM_AM::rrx) return false;
7318    switch (Inst.getOpcode()) {
7319    default: llvm_unreachable("unexpected opcode!");
7320    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7321    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7322    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7323    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7324    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7325    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7326    }
7327    // If the shift is by zero, use the non-shifted instruction definition.
7328    // The exception is for right shifts, where 0 == 32
7329    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0 &&
7330        !(SOpc == ARM_AM::lsr || SOpc == ARM_AM::asr)) {
7331      MCInst TmpInst;
7332      TmpInst.setOpcode(newOpc);
7333      TmpInst.addOperand(Inst.getOperand(0));
7334      TmpInst.addOperand(Inst.getOperand(1));
7335      TmpInst.addOperand(Inst.getOperand(2));
7336      TmpInst.addOperand(Inst.getOperand(4));
7337      TmpInst.addOperand(Inst.getOperand(5));
7338      TmpInst.addOperand(Inst.getOperand(6));
7339      Inst = TmpInst;
7340      return true;
7341    }
7342    return false;
7343  }
7344  case ARM::ITasm:
7345  case ARM::t2IT: {
7346    // The mask bits for all but the first condition are represented as
7347    // the low bit of the condition code value implies 't'. We currently
7348    // always have 1 implies 't', so XOR toggle the bits if the low bit
7349    // of the condition code is zero.
7350    MCOperand &MO = Inst.getOperand(1);
7351    unsigned Mask = MO.getImm();
7352    unsigned OrigMask = Mask;
7353    unsigned TZ = CountTrailingZeros_32(Mask);
7354    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7355      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7356      for (unsigned i = 3; i != TZ; --i)
7357        Mask ^= 1 << i;
7358    }
7359    MO.setImm(Mask);
7360
7361    // Set up the IT block state according to the IT instruction we just
7362    // matched.
7363    assert(!inITBlock() && "nested IT blocks?!");
7364    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7365    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7366    ITState.CurPosition = 0;
7367    ITState.FirstCond = true;
7368    break;
7369  }
7370  case ARM::t2LSLrr:
7371  case ARM::t2LSRrr:
7372  case ARM::t2ASRrr:
7373  case ARM::t2SBCrr:
7374  case ARM::t2RORrr:
7375  case ARM::t2BICrr:
7376  {
7377    // Assemblers should use the narrow encodings of these instructions when permissible.
7378    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7379         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7380        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
7381        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7382         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7383        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7384         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7385      unsigned NewOpc;
7386      switch (Inst.getOpcode()) {
7387        default: llvm_unreachable("unexpected opcode");
7388        case ARM::t2LSLrr: NewOpc = ARM::tLSLrr; break;
7389        case ARM::t2LSRrr: NewOpc = ARM::tLSRrr; break;
7390        case ARM::t2ASRrr: NewOpc = ARM::tASRrr; break;
7391        case ARM::t2SBCrr: NewOpc = ARM::tSBC; break;
7392        case ARM::t2RORrr: NewOpc = ARM::tROR; break;
7393        case ARM::t2BICrr: NewOpc = ARM::tBIC; break;
7394      }
7395      MCInst TmpInst;
7396      TmpInst.setOpcode(NewOpc);
7397      TmpInst.addOperand(Inst.getOperand(0));
7398      TmpInst.addOperand(Inst.getOperand(5));
7399      TmpInst.addOperand(Inst.getOperand(1));
7400      TmpInst.addOperand(Inst.getOperand(2));
7401      TmpInst.addOperand(Inst.getOperand(3));
7402      TmpInst.addOperand(Inst.getOperand(4));
7403      Inst = TmpInst;
7404      return true;
7405    }
7406    return false;
7407  }
7408  case ARM::t2ANDrr:
7409  case ARM::t2EORrr:
7410  case ARM::t2ADCrr:
7411  case ARM::t2ORRrr:
7412  {
7413    // Assemblers should use the narrow encodings of these instructions when permissible.
7414    // These instructions are special in that they are commutable, so shorter encodings
7415    // are available more often.
7416    if ((isARMLowRegister(Inst.getOperand(1).getReg()) &&
7417         isARMLowRegister(Inst.getOperand(2).getReg())) &&
7418        (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() ||
7419         Inst.getOperand(0).getReg() == Inst.getOperand(2).getReg()) &&
7420        ((!inITBlock() && Inst.getOperand(5).getReg() == ARM::CPSR) ||
7421         (inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR)) &&
7422        (!static_cast<ARMOperand*>(Operands[3])->isToken() ||
7423         !static_cast<ARMOperand*>(Operands[3])->getToken().equals_lower(".w"))) {
7424      unsigned NewOpc;
7425      switch (Inst.getOpcode()) {
7426        default: llvm_unreachable("unexpected opcode");
7427        case ARM::t2ADCrr: NewOpc = ARM::tADC; break;
7428        case ARM::t2ANDrr: NewOpc = ARM::tAND; break;
7429        case ARM::t2EORrr: NewOpc = ARM::tEOR; break;
7430        case ARM::t2ORRrr: NewOpc = ARM::tORR; break;
7431      }
7432      MCInst TmpInst;
7433      TmpInst.setOpcode(NewOpc);
7434      TmpInst.addOperand(Inst.getOperand(0));
7435      TmpInst.addOperand(Inst.getOperand(5));
7436      if (Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg()) {
7437        TmpInst.addOperand(Inst.getOperand(1));
7438        TmpInst.addOperand(Inst.getOperand(2));
7439      } else {
7440        TmpInst.addOperand(Inst.getOperand(2));
7441        TmpInst.addOperand(Inst.getOperand(1));
7442      }
7443      TmpInst.addOperand(Inst.getOperand(3));
7444      TmpInst.addOperand(Inst.getOperand(4));
7445      Inst = TmpInst;
7446      return true;
7447    }
7448    return false;
7449  }
7450  }
7451  return false;
7452}
7453
7454unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7455  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7456  // suffix depending on whether they're in an IT block or not.
7457  unsigned Opc = Inst.getOpcode();
7458  const MCInstrDesc &MCID = getInstDesc(Opc);
7459  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7460    assert(MCID.hasOptionalDef() &&
7461           "optionally flag setting instruction missing optional def operand");
7462    assert(MCID.NumOperands == Inst.getNumOperands() &&
7463           "operand count mismatch!");
7464    // Find the optional-def operand (cc_out).
7465    unsigned OpNo;
7466    for (OpNo = 0;
7467         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7468         ++OpNo)
7469      ;
7470    // If we're parsing Thumb1, reject it completely.
7471    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7472      return Match_MnemonicFail;
7473    // If we're parsing Thumb2, which form is legal depends on whether we're
7474    // in an IT block.
7475    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7476        !inITBlock())
7477      return Match_RequiresITBlock;
7478    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7479        inITBlock())
7480      return Match_RequiresNotITBlock;
7481  }
7482  // Some high-register supporting Thumb1 encodings only allow both registers
7483  // to be from r0-r7 when in Thumb2.
7484  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7485           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7486           isARMLowRegister(Inst.getOperand(2).getReg()))
7487    return Match_RequiresThumb2;
7488  // Others only require ARMv6 or later.
7489  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7490           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7491           isARMLowRegister(Inst.getOperand(1).getReg()))
7492    return Match_RequiresV6;
7493  return Match_Success;
7494}
7495
7496static const char *getSubtargetFeatureName(unsigned Val);
7497bool ARMAsmParser::
7498MatchAndEmitInstruction(SMLoc IDLoc, unsigned &Opcode,
7499                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7500                        MCStreamer &Out, unsigned &ErrorInfo,
7501                        bool MatchingInlineAsm) {
7502  MCInst Inst;
7503  unsigned MatchResult;
7504
7505  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo,
7506                                     MatchingInlineAsm);
7507  switch (MatchResult) {
7508  default: break;
7509  case Match_Success:
7510    // Context sensitive operand constraints aren't handled by the matcher,
7511    // so check them here.
7512    if (validateInstruction(Inst, Operands)) {
7513      // Still progress the IT block, otherwise one wrong condition causes
7514      // nasty cascading errors.
7515      forwardITPosition();
7516      return true;
7517    }
7518
7519    // Some instructions need post-processing to, for example, tweak which
7520    // encoding is selected. Loop on it while changes happen so the
7521    // individual transformations can chain off each other. E.g.,
7522    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7523    while (processInstruction(Inst, Operands))
7524      ;
7525
7526    // Only move forward at the very end so that everything in validate
7527    // and process gets a consistent answer about whether we're in an IT
7528    // block.
7529    forwardITPosition();
7530
7531    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7532    // doesn't actually encode.
7533    if (Inst.getOpcode() == ARM::ITasm)
7534      return false;
7535
7536    Inst.setLoc(IDLoc);
7537    Out.EmitInstruction(Inst);
7538    return false;
7539  case Match_MissingFeature: {
7540    assert(ErrorInfo && "Unknown missing feature!");
7541    // Special case the error message for the very common case where only
7542    // a single subtarget feature is missing (Thumb vs. ARM, e.g.).
7543    std::string Msg = "instruction requires:";
7544    unsigned Mask = 1;
7545    for (unsigned i = 0; i < (sizeof(ErrorInfo)*8-1); ++i) {
7546      if (ErrorInfo & Mask) {
7547        Msg += " ";
7548        Msg += getSubtargetFeatureName(ErrorInfo & Mask);
7549      }
7550      Mask <<= 1;
7551    }
7552    return Error(IDLoc, Msg);
7553  }
7554  case Match_InvalidOperand: {
7555    SMLoc ErrorLoc = IDLoc;
7556    if (ErrorInfo != ~0U) {
7557      if (ErrorInfo >= Operands.size())
7558        return Error(IDLoc, "too few operands for instruction");
7559
7560      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7561      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7562    }
7563
7564    return Error(ErrorLoc, "invalid operand for instruction");
7565  }
7566  case Match_MnemonicFail:
7567    return Error(IDLoc, "invalid instruction",
7568                 ((ARMOperand*)Operands[0])->getLocRange());
7569  case Match_RequiresNotITBlock:
7570    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7571  case Match_RequiresITBlock:
7572    return Error(IDLoc, "instruction only valid inside IT block");
7573  case Match_RequiresV6:
7574    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7575  case Match_RequiresThumb2:
7576    return Error(IDLoc, "instruction variant requires Thumb2");
7577  case Match_ImmRange0_15: {
7578    SMLoc ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7579    if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7580    return Error(ErrorLoc, "immediate operand must be in the range [0,15]");
7581  }
7582  }
7583
7584  llvm_unreachable("Implement any new match types added!");
7585}
7586
7587/// parseDirective parses the arm specific directives
7588bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7589  StringRef IDVal = DirectiveID.getIdentifier();
7590  if (IDVal == ".word")
7591    return parseDirectiveWord(4, DirectiveID.getLoc());
7592  else if (IDVal == ".thumb")
7593    return parseDirectiveThumb(DirectiveID.getLoc());
7594  else if (IDVal == ".arm")
7595    return parseDirectiveARM(DirectiveID.getLoc());
7596  else if (IDVal == ".thumb_func")
7597    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7598  else if (IDVal == ".code")
7599    return parseDirectiveCode(DirectiveID.getLoc());
7600  else if (IDVal == ".syntax")
7601    return parseDirectiveSyntax(DirectiveID.getLoc());
7602  else if (IDVal == ".unreq")
7603    return parseDirectiveUnreq(DirectiveID.getLoc());
7604  else if (IDVal == ".arch")
7605    return parseDirectiveArch(DirectiveID.getLoc());
7606  else if (IDVal == ".eabi_attribute")
7607    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7608  return true;
7609}
7610
7611/// parseDirectiveWord
7612///  ::= .word [ expression (, expression)* ]
7613bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7614  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7615    for (;;) {
7616      const MCExpr *Value;
7617      if (getParser().ParseExpression(Value))
7618        return true;
7619
7620      getParser().getStreamer().EmitValue(Value, Size);
7621
7622      if (getLexer().is(AsmToken::EndOfStatement))
7623        break;
7624
7625      // FIXME: Improve diagnostic.
7626      if (getLexer().isNot(AsmToken::Comma))
7627        return Error(L, "unexpected token in directive");
7628      Parser.Lex();
7629    }
7630  }
7631
7632  Parser.Lex();
7633  return false;
7634}
7635
7636/// parseDirectiveThumb
7637///  ::= .thumb
7638bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7639  if (getLexer().isNot(AsmToken::EndOfStatement))
7640    return Error(L, "unexpected token in directive");
7641  Parser.Lex();
7642
7643  if (!isThumb())
7644    SwitchMode();
7645  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7646  return false;
7647}
7648
7649/// parseDirectiveARM
7650///  ::= .arm
7651bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7652  if (getLexer().isNot(AsmToken::EndOfStatement))
7653    return Error(L, "unexpected token in directive");
7654  Parser.Lex();
7655
7656  if (isThumb())
7657    SwitchMode();
7658  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7659  return false;
7660}
7661
7662/// parseDirectiveThumbFunc
7663///  ::= .thumbfunc symbol_name
7664bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7665  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7666  bool isMachO = MAI.hasSubsectionsViaSymbols();
7667  StringRef Name;
7668  bool needFuncName = true;
7669
7670  // Darwin asm has (optionally) function name after .thumb_func direction
7671  // ELF doesn't
7672  if (isMachO) {
7673    const AsmToken &Tok = Parser.getTok();
7674    if (Tok.isNot(AsmToken::EndOfStatement)) {
7675      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7676        return Error(L, "unexpected token in .thumb_func directive");
7677      Name = Tok.getIdentifier();
7678      Parser.Lex(); // Consume the identifier token.
7679      needFuncName = false;
7680    }
7681  }
7682
7683  if (getLexer().isNot(AsmToken::EndOfStatement))
7684    return Error(L, "unexpected token in directive");
7685
7686  // Eat the end of statement and any blank lines that follow.
7687  while (getLexer().is(AsmToken::EndOfStatement))
7688    Parser.Lex();
7689
7690  // FIXME: assuming function name will be the line following .thumb_func
7691  // We really should be checking the next symbol definition even if there's
7692  // stuff in between.
7693  if (needFuncName) {
7694    Name = Parser.getTok().getIdentifier();
7695  }
7696
7697  // Mark symbol as a thumb symbol.
7698  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7699  getParser().getStreamer().EmitThumbFunc(Func);
7700  return false;
7701}
7702
7703/// parseDirectiveSyntax
7704///  ::= .syntax unified | divided
7705bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7706  const AsmToken &Tok = Parser.getTok();
7707  if (Tok.isNot(AsmToken::Identifier))
7708    return Error(L, "unexpected token in .syntax directive");
7709  StringRef Mode = Tok.getString();
7710  if (Mode == "unified" || Mode == "UNIFIED")
7711    Parser.Lex();
7712  else if (Mode == "divided" || Mode == "DIVIDED")
7713    return Error(L, "'.syntax divided' arm asssembly not supported");
7714  else
7715    return Error(L, "unrecognized syntax mode in .syntax directive");
7716
7717  if (getLexer().isNot(AsmToken::EndOfStatement))
7718    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7719  Parser.Lex();
7720
7721  // TODO tell the MC streamer the mode
7722  // getParser().getStreamer().Emit???();
7723  return false;
7724}
7725
7726/// parseDirectiveCode
7727///  ::= .code 16 | 32
7728bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7729  const AsmToken &Tok = Parser.getTok();
7730  if (Tok.isNot(AsmToken::Integer))
7731    return Error(L, "unexpected token in .code directive");
7732  int64_t Val = Parser.getTok().getIntVal();
7733  if (Val == 16)
7734    Parser.Lex();
7735  else if (Val == 32)
7736    Parser.Lex();
7737  else
7738    return Error(L, "invalid operand to .code directive");
7739
7740  if (getLexer().isNot(AsmToken::EndOfStatement))
7741    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7742  Parser.Lex();
7743
7744  if (Val == 16) {
7745    if (!isThumb())
7746      SwitchMode();
7747    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7748  } else {
7749    if (isThumb())
7750      SwitchMode();
7751    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7752  }
7753
7754  return false;
7755}
7756
7757/// parseDirectiveReq
7758///  ::= name .req registername
7759bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7760  Parser.Lex(); // Eat the '.req' token.
7761  unsigned Reg;
7762  SMLoc SRegLoc, ERegLoc;
7763  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7764    Parser.EatToEndOfStatement();
7765    return Error(SRegLoc, "register name expected");
7766  }
7767
7768  // Shouldn't be anything else.
7769  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7770    Parser.EatToEndOfStatement();
7771    return Error(Parser.getTok().getLoc(),
7772                 "unexpected input in .req directive.");
7773  }
7774
7775  Parser.Lex(); // Consume the EndOfStatement
7776
7777  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7778    return Error(SRegLoc, "redefinition of '" + Name +
7779                          "' does not match original.");
7780
7781  return false;
7782}
7783
7784/// parseDirectiveUneq
7785///  ::= .unreq registername
7786bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7787  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7788    Parser.EatToEndOfStatement();
7789    return Error(L, "unexpected input in .unreq directive.");
7790  }
7791  RegisterReqs.erase(Parser.getTok().getIdentifier());
7792  Parser.Lex(); // Eat the identifier.
7793  return false;
7794}
7795
7796/// parseDirectiveArch
7797///  ::= .arch token
7798bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7799  return true;
7800}
7801
7802/// parseDirectiveEabiAttr
7803///  ::= .eabi_attribute int, int
7804bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7805  return true;
7806}
7807
7808/// Force static initialization.
7809extern "C" void LLVMInitializeARMAsmParser() {
7810  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7811  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7812}
7813
7814#define GET_REGISTER_MATCHER
7815#define GET_SUBTARGET_FEATURE_NAME
7816#define GET_MATCHER_IMPLEMENTATION
7817#include "ARMGenAsmMatcher.inc"
7818
7819// Define this matcher function after the auto-generated include so we
7820// have the match class enum definitions.
7821unsigned ARMAsmParser::validateTargetOperandClass(MCParsedAsmOperand *AsmOp,
7822                                                  unsigned Kind) {
7823  ARMOperand *Op = static_cast<ARMOperand*>(AsmOp);
7824  // If the kind is a token for a literal immediate, check if our asm
7825  // operand matches. This is for InstAliases which have a fixed-value
7826  // immediate in the syntax.
7827  if (Kind == MCK__35_0 && Op->isImm()) {
7828    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
7829    if (!CE)
7830      return Match_InvalidOperand;
7831    if (CE->getValue() == 0)
7832      return Match_Success;
7833  }
7834  return Match_InvalidOperand;
7835}
7836