ARMAsmParser.cpp revision 0b4c6738868e11ba06047a406f79489cb1db8c5a
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_FPImmediate,
274    k_MemBarrierOpt,
275    k_Memory,
276    k_PostIndexRegister,
277    k_MSRMask,
278    k_ProcIFlags,
279    k_VectorIndex,
280    k_Register,
281    k_RegisterList,
282    k_DPRRegisterList,
283    k_SPRRegisterList,
284    k_VectorList,
285    k_VectorListAllLanes,
286    k_VectorListIndexed,
287    k_ShiftedRegister,
288    k_ShiftedImmediate,
289    k_ShifterImmediate,
290    k_RotateImmediate,
291    k_BitfieldDescriptor,
292    k_Token
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296  SmallVector<unsigned, 8> Registers;
297
298  union {
299    struct {
300      ARMCC::CondCodes Val;
301    } CC;
302
303    struct {
304      unsigned Val;
305    } Cop;
306
307    struct {
308      unsigned Val;
309    } CoprocOption;
310
311    struct {
312      unsigned Mask:4;
313    } ITMask;
314
315    struct {
316      ARM_MB::MemBOpt Val;
317    } MBOpt;
318
319    struct {
320      ARM_PROC::IFlags Val;
321    } IFlags;
322
323    struct {
324      unsigned Val;
325    } MMask;
326
327    struct {
328      const char *Data;
329      unsigned Length;
330    } Tok;
331
332    struct {
333      unsigned RegNum;
334    } Reg;
335
336    // A vector register list is a sequential list of 1 to 4 registers.
337    struct {
338      unsigned RegNum;
339      unsigned Count;
340      unsigned LaneIndex;
341      bool isDoubleSpaced;
342    } VectorList;
343
344    struct {
345      unsigned Val;
346    } VectorIndex;
347
348    struct {
349      const MCExpr *Val;
350    } Imm;
351
352    struct {
353      unsigned Val;       // encoded 8-bit representation
354    } FPImm;
355
356    /// Combined record for all forms of ARM address expressions.
357    struct {
358      unsigned BaseRegNum;
359      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
360      // was specified.
361      const MCConstantExpr *OffsetImm;  // Offset immediate value
362      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
363      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
364      unsigned ShiftImm;        // shift for OffsetReg.
365      unsigned Alignment;       // 0 = no alignment specified
366                                // n = alignment in bytes (2, 4, 8, 16, or 32)
367      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
368    } Memory;
369
370    struct {
371      unsigned RegNum;
372      bool isAdd;
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned ShiftImm;
375    } PostIdxReg;
376
377    struct {
378      bool isASR;
379      unsigned Imm;
380    } ShifterImm;
381    struct {
382      ARM_AM::ShiftOpc ShiftTy;
383      unsigned SrcReg;
384      unsigned ShiftReg;
385      unsigned ShiftImm;
386    } RegShiftedReg;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftImm;
391    } RegShiftedImm;
392    struct {
393      unsigned Imm;
394    } RotImm;
395    struct {
396      unsigned LSB;
397      unsigned Width;
398    } Bitfield;
399  };
400
401  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
402public:
403  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
404    Kind = o.Kind;
405    StartLoc = o.StartLoc;
406    EndLoc = o.EndLoc;
407    switch (Kind) {
408    case k_CondCode:
409      CC = o.CC;
410      break;
411    case k_ITCondMask:
412      ITMask = o.ITMask;
413      break;
414    case k_Token:
415      Tok = o.Tok;
416      break;
417    case k_CCOut:
418    case k_Register:
419      Reg = o.Reg;
420      break;
421    case k_RegisterList:
422    case k_DPRRegisterList:
423    case k_SPRRegisterList:
424      Registers = o.Registers;
425      break;
426    case k_VectorList:
427    case k_VectorListAllLanes:
428    case k_VectorListIndexed:
429      VectorList = o.VectorList;
430      break;
431    case k_CoprocNum:
432    case k_CoprocReg:
433      Cop = o.Cop;
434      break;
435    case k_CoprocOption:
436      CoprocOption = o.CoprocOption;
437      break;
438    case k_Immediate:
439      Imm = o.Imm;
440      break;
441    case k_FPImmediate:
442      FPImm = o.FPImm;
443      break;
444    case k_MemBarrierOpt:
445      MBOpt = o.MBOpt;
446      break;
447    case k_Memory:
448      Memory = o.Memory;
449      break;
450    case k_PostIndexRegister:
451      PostIdxReg = o.PostIdxReg;
452      break;
453    case k_MSRMask:
454      MMask = o.MMask;
455      break;
456    case k_ProcIFlags:
457      IFlags = o.IFlags;
458      break;
459    case k_ShifterImmediate:
460      ShifterImm = o.ShifterImm;
461      break;
462    case k_ShiftedRegister:
463      RegShiftedReg = o.RegShiftedReg;
464      break;
465    case k_ShiftedImmediate:
466      RegShiftedImm = o.RegShiftedImm;
467      break;
468    case k_RotateImmediate:
469      RotImm = o.RotImm;
470      break;
471    case k_BitfieldDescriptor:
472      Bitfield = o.Bitfield;
473      break;
474    case k_VectorIndex:
475      VectorIndex = o.VectorIndex;
476      break;
477    }
478  }
479
480  /// getStartLoc - Get the location of the first token of this operand.
481  SMLoc getStartLoc() const { return StartLoc; }
482  /// getEndLoc - Get the location of the last token of this operand.
483  SMLoc getEndLoc() const { return EndLoc; }
484
485  ARMCC::CondCodes getCondCode() const {
486    assert(Kind == k_CondCode && "Invalid access!");
487    return CC.Val;
488  }
489
490  unsigned getCoproc() const {
491    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
492    return Cop.Val;
493  }
494
495  StringRef getToken() const {
496    assert(Kind == k_Token && "Invalid access!");
497    return StringRef(Tok.Data, Tok.Length);
498  }
499
500  unsigned getReg() const {
501    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
502    return Reg.RegNum;
503  }
504
505  const SmallVectorImpl<unsigned> &getRegList() const {
506    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
507            Kind == k_SPRRegisterList) && "Invalid access!");
508    return Registers;
509  }
510
511  const MCExpr *getImm() const {
512    assert(isImm() && "Invalid access!");
513    return Imm.Val;
514  }
515
516  unsigned getFPImm() const {
517    assert(Kind == k_FPImmediate && "Invalid access!");
518    return FPImm.Val;
519  }
520
521  unsigned getVectorIndex() const {
522    assert(Kind == k_VectorIndex && "Invalid access!");
523    return VectorIndex.Val;
524  }
525
526  ARM_MB::MemBOpt getMemBarrierOpt() const {
527    assert(Kind == k_MemBarrierOpt && "Invalid access!");
528    return MBOpt.Val;
529  }
530
531  ARM_PROC::IFlags getProcIFlags() const {
532    assert(Kind == k_ProcIFlags && "Invalid access!");
533    return IFlags.Val;
534  }
535
536  unsigned getMSRMask() const {
537    assert(Kind == k_MSRMask && "Invalid access!");
538    return MMask.Val;
539  }
540
541  bool isCoprocNum() const { return Kind == k_CoprocNum; }
542  bool isCoprocReg() const { return Kind == k_CoprocReg; }
543  bool isCoprocOption() const { return Kind == k_CoprocOption; }
544  bool isCondCode() const { return Kind == k_CondCode; }
545  bool isCCOut() const { return Kind == k_CCOut; }
546  bool isITMask() const { return Kind == k_ITCondMask; }
547  bool isITCondCode() const { return Kind == k_CondCode; }
548  bool isImm() const { return Kind == k_Immediate; }
549  bool isFPImm() const { return Kind == k_FPImmediate; }
550  bool isFBits16() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 0 && Value <= 16;
556  }
557  bool isFBits32() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return Value >= 1 && Value <= 32;
563  }
564  bool isImm8s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
570  }
571  bool isImm0_1020s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
577  }
578  bool isImm0_508s4() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
584  }
585  bool isImm0_255() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 256;
591  }
592  bool isImm0_1() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 2;
598  }
599  bool isImm0_3() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 4;
605  }
606  bool isImm0_7() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 8;
612  }
613  bool isImm0_15() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 16;
619  }
620  bool isImm0_31() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 32;
626  }
627  bool isImm0_63() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value >= 0 && Value < 64;
633  }
634  bool isImm8() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 8;
640  }
641  bool isImm16() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 16;
647  }
648  bool isImm32() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value == 32;
654  }
655  bool isShrImm8() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 8;
661  }
662  bool isShrImm16() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 16;
668  }
669  bool isShrImm32() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 32;
675  }
676  bool isShrImm64() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value <= 64;
682  }
683  bool isImm1_7() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 8;
689  }
690  bool isImm1_15() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 16;
696  }
697  bool isImm1_31() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 32;
703  }
704  bool isImm1_16() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 17;
710  }
711  bool isImm1_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value > 0 && Value < 33;
717  }
718  bool isImm0_32() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 33;
724  }
725  bool isImm0_65535() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    if (!CE) return false;
729    int64_t Value = CE->getValue();
730    return Value >= 0 && Value < 65536;
731  }
732  bool isImm0_65535Expr() const {
733    if (!isImm()) return false;
734    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
735    // If it's not a constant expression, it'll generate a fixup and be
736    // handled later.
737    if (!CE) return true;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value < 65536;
740  }
741  bool isImm24bit() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value >= 0 && Value <= 0xffffff;
747  }
748  bool isImmThumbSR() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value > 0 && Value < 33;
754  }
755  bool isPKHLSLImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value >= 0 && Value < 32;
761  }
762  bool isPKHASRImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return Value > 0 && Value <= 32;
768  }
769  bool isARMSOImm() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(Value) != -1;
775  }
776  bool isARMSOImmNot() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(~Value) != -1;
782  }
783  bool isARMSOImmNeg() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getSOImmVal(-Value) != -1;
789  }
790  bool isT2SOImm() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(Value) != -1;
796  }
797  bool isT2SOImmNot() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(~Value) != -1;
803  }
804  bool isT2SOImmNeg() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return ARM_AM::getT2SOImmVal(-Value) != -1;
810  }
811  bool isSetEndImm() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = CE->getValue();
816    return Value == 1 || Value == 0;
817  }
818  bool isReg() const { return Kind == k_Register; }
819  bool isRegList() const { return Kind == k_RegisterList; }
820  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
821  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
822  bool isToken() const { return Kind == k_Token; }
823  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
824  bool isMemory() const { return Kind == k_Memory; }
825  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
826  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
827  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
828  bool isRotImm() const { return Kind == k_RotateImmediate; }
829  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
830  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
831  bool isPostIdxReg() const {
832    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
833  }
834  bool isMemNoOffset(bool alignOK = false) const {
835    if (!isMemory())
836      return false;
837    // No offset of any kind.
838    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
839     (alignOK || Memory.Alignment == 0);
840  }
841  bool isMemPCRelImm12() const {
842    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
843      return false;
844    // Base register must be PC.
845    if (Memory.BaseRegNum != ARM::PC)
846      return false;
847    // Immediate offset in range [-4095, 4095].
848    if (!Memory.OffsetImm) return true;
849    int64_t Val = Memory.OffsetImm->getValue();
850    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
851  }
852  bool isAlignedMemory() const {
853    return isMemNoOffset(true);
854  }
855  bool isAddrMode2() const {
856    if (!isMemory() || Memory.Alignment != 0) return false;
857    // Check for register offset.
858    if (Memory.OffsetRegNum) return true;
859    // Immediate offset in range [-4095, 4095].
860    if (!Memory.OffsetImm) return true;
861    int64_t Val = Memory.OffsetImm->getValue();
862    return Val > -4096 && Val < 4096;
863  }
864  bool isAM2OffsetImm() const {
865    if (!isImm()) return false;
866    // Immediate offset in range [-4095, 4095].
867    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
868    if (!CE) return false;
869    int64_t Val = CE->getValue();
870    return Val > -4096 && Val < 4096;
871  }
872  bool isAddrMode3() const {
873    // If we have an immediate that's not a constant, treat it as a label
874    // reference needing a fixup. If it is a constant, it's something else
875    // and we reject it.
876    if (isImm() && !isa<MCConstantExpr>(getImm()))
877      return true;
878    if (!isMemory() || Memory.Alignment != 0) return false;
879    // No shifts are legal for AM3.
880    if (Memory.ShiftType != ARM_AM::no_shift) return false;
881    // Check for register offset.
882    if (Memory.OffsetRegNum) return true;
883    // Immediate offset in range [-255, 255].
884    if (!Memory.OffsetImm) return true;
885    int64_t Val = Memory.OffsetImm->getValue();
886    return Val > -256 && Val < 256;
887  }
888  bool isAM3Offset() const {
889    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
890      return false;
891    if (Kind == k_PostIndexRegister)
892      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
893    // Immediate offset in range [-255, 255].
894    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
895    if (!CE) return false;
896    int64_t Val = CE->getValue();
897    // Special case, #-0 is INT32_MIN.
898    return (Val > -256 && Val < 256) || Val == INT32_MIN;
899  }
900  bool isAddrMode5() const {
901    // If we have an immediate that's not a constant, treat it as a label
902    // reference needing a fixup. If it is a constant, it's something else
903    // and we reject it.
904    if (isImm() && !isa<MCConstantExpr>(getImm()))
905      return true;
906    if (!isMemory() || Memory.Alignment != 0) return false;
907    // Check for register offset.
908    if (Memory.OffsetRegNum) return false;
909    // Immediate offset in range [-1020, 1020] and a multiple of 4.
910    if (!Memory.OffsetImm) return true;
911    int64_t Val = Memory.OffsetImm->getValue();
912    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
913      Val == INT32_MIN;
914  }
915  bool isMemTBB() const {
916    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
917        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
918      return false;
919    return true;
920  }
921  bool isMemTBH() const {
922    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
923        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
924        Memory.Alignment != 0 )
925      return false;
926    return true;
927  }
928  bool isMemRegOffset() const {
929    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
930      return false;
931    return true;
932  }
933  bool isT2MemRegOffset() const {
934    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
935        Memory.Alignment != 0)
936      return false;
937    // Only lsl #{0, 1, 2, 3} allowed.
938    if (Memory.ShiftType == ARM_AM::no_shift)
939      return true;
940    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
941      return false;
942    return true;
943  }
944  bool isMemThumbRR() const {
945    // Thumb reg+reg addressing is simple. Just two registers, a base and
946    // an offset. No shifts, negations or any other complicating factors.
947    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
948        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
949      return false;
950    return isARMLowRegister(Memory.BaseRegNum) &&
951      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
952  }
953  bool isMemThumbRIs4() const {
954    if (!isMemory() || Memory.OffsetRegNum != 0 ||
955        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
956      return false;
957    // Immediate offset, multiple of 4 in range [0, 124].
958    if (!Memory.OffsetImm) return true;
959    int64_t Val = Memory.OffsetImm->getValue();
960    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
961  }
962  bool isMemThumbRIs2() const {
963    if (!isMemory() || Memory.OffsetRegNum != 0 ||
964        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
965      return false;
966    // Immediate offset, multiple of 4 in range [0, 62].
967    if (!Memory.OffsetImm) return true;
968    int64_t Val = Memory.OffsetImm->getValue();
969    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
970  }
971  bool isMemThumbRIs1() const {
972    if (!isMemory() || Memory.OffsetRegNum != 0 ||
973        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
974      return false;
975    // Immediate offset in range [0, 31].
976    if (!Memory.OffsetImm) return true;
977    int64_t Val = Memory.OffsetImm->getValue();
978    return Val >= 0 && Val <= 31;
979  }
980  bool isMemThumbSPI() const {
981    if (!isMemory() || Memory.OffsetRegNum != 0 ||
982        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
983      return false;
984    // Immediate offset, multiple of 4 in range [0, 1020].
985    if (!Memory.OffsetImm) return true;
986    int64_t Val = Memory.OffsetImm->getValue();
987    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
988  }
989  bool isMemImm8s4Offset() const {
990    // If we have an immediate that's not a constant, treat it as a label
991    // reference needing a fixup. If it is a constant, it's something else
992    // and we reject it.
993    if (isImm() && !isa<MCConstantExpr>(getImm()))
994      return true;
995    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
996      return false;
997    // Immediate offset a multiple of 4 in range [-1020, 1020].
998    if (!Memory.OffsetImm) return true;
999    int64_t Val = Memory.OffsetImm->getValue();
1000    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1001  }
1002  bool isMemImm0_1020s4Offset() const {
1003    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1004      return false;
1005    // Immediate offset a multiple of 4 in range [0, 1020].
1006    if (!Memory.OffsetImm) return true;
1007    int64_t Val = Memory.OffsetImm->getValue();
1008    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1009  }
1010  bool isMemImm8Offset() const {
1011    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1012      return false;
1013    // Base reg of PC isn't allowed for these encodings.
1014    if (Memory.BaseRegNum == ARM::PC) return false;
1015    // Immediate offset in range [-255, 255].
1016    if (!Memory.OffsetImm) return true;
1017    int64_t Val = Memory.OffsetImm->getValue();
1018    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1019  }
1020  bool isMemPosImm8Offset() const {
1021    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1022      return false;
1023    // Immediate offset in range [0, 255].
1024    if (!Memory.OffsetImm) return true;
1025    int64_t Val = Memory.OffsetImm->getValue();
1026    return Val >= 0 && Val < 256;
1027  }
1028  bool isMemNegImm8Offset() const {
1029    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1030      return false;
1031    // Base reg of PC isn't allowed for these encodings.
1032    if (Memory.BaseRegNum == ARM::PC) return false;
1033    // Immediate offset in range [-255, -1].
1034    if (!Memory.OffsetImm) return false;
1035    int64_t Val = Memory.OffsetImm->getValue();
1036    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1037  }
1038  bool isMemUImm12Offset() const {
1039    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1040      return false;
1041    // Immediate offset in range [0, 4095].
1042    if (!Memory.OffsetImm) return true;
1043    int64_t Val = Memory.OffsetImm->getValue();
1044    return (Val >= 0 && Val < 4096);
1045  }
1046  bool isMemImm12Offset() const {
1047    // If we have an immediate that's not a constant, treat it as a label
1048    // reference needing a fixup. If it is a constant, it's something else
1049    // and we reject it.
1050    if (isImm() && !isa<MCConstantExpr>(getImm()))
1051      return true;
1052
1053    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1054      return false;
1055    // Immediate offset in range [-4095, 4095].
1056    if (!Memory.OffsetImm) return true;
1057    int64_t Val = Memory.OffsetImm->getValue();
1058    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1059  }
1060  bool isPostIdxImm8() const {
1061    if (!isImm()) return false;
1062    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1063    if (!CE) return false;
1064    int64_t Val = CE->getValue();
1065    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1066  }
1067  bool isPostIdxImm8s4() const {
1068    if (!isImm()) return false;
1069    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1070    if (!CE) return false;
1071    int64_t Val = CE->getValue();
1072    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1073      (Val == INT32_MIN);
1074  }
1075
1076  bool isMSRMask() const { return Kind == k_MSRMask; }
1077  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1078
1079  // NEON operands.
1080  bool isSingleSpacedVectorList() const {
1081    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1082  }
1083  bool isDoubleSpacedVectorList() const {
1084    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1085  }
1086  bool isVecListOneD() const {
1087    if (!isSingleSpacedVectorList()) return false;
1088    return VectorList.Count == 1;
1089  }
1090
1091  bool isVecListTwoD() const {
1092    if (!isSingleSpacedVectorList()) return false;
1093    return VectorList.Count == 2;
1094  }
1095
1096  bool isVecListThreeD() const {
1097    if (!isSingleSpacedVectorList()) return false;
1098    return VectorList.Count == 3;
1099  }
1100
1101  bool isVecListFourD() const {
1102    if (!isSingleSpacedVectorList()) return false;
1103    return VectorList.Count == 4;
1104  }
1105
1106  bool isVecListTwoQ() const {
1107    if (!isDoubleSpacedVectorList()) return false;
1108    return VectorList.Count == 2;
1109  }
1110
1111  bool isSingleSpacedVectorAllLanes() const {
1112    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1113  }
1114  bool isDoubleSpacedVectorAllLanes() const {
1115    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1116  }
1117  bool isVecListOneDAllLanes() const {
1118    if (!isSingleSpacedVectorAllLanes()) return false;
1119    return VectorList.Count == 1;
1120  }
1121
1122  bool isVecListTwoDAllLanes() const {
1123    if (!isSingleSpacedVectorAllLanes()) return false;
1124    return VectorList.Count == 2;
1125  }
1126
1127  bool isVecListTwoQAllLanes() const {
1128    if (!isDoubleSpacedVectorAllLanes()) return false;
1129    return VectorList.Count == 2;
1130  }
1131
1132  bool isSingleSpacedVectorIndexed() const {
1133    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1134  }
1135  bool isDoubleSpacedVectorIndexed() const {
1136    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1137  }
1138  bool isVecListOneDByteIndexed() const {
1139    if (!isSingleSpacedVectorIndexed()) return false;
1140    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1141  }
1142
1143  bool isVecListOneDHWordIndexed() const {
1144    if (!isSingleSpacedVectorIndexed()) return false;
1145    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1146  }
1147
1148  bool isVecListOneDWordIndexed() const {
1149    if (!isSingleSpacedVectorIndexed()) return false;
1150    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1151  }
1152
1153  bool isVecListTwoDByteIndexed() const {
1154    if (!isSingleSpacedVectorIndexed()) return false;
1155    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1156  }
1157
1158  bool isVecListTwoDHWordIndexed() const {
1159    if (!isSingleSpacedVectorIndexed()) return false;
1160    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1161  }
1162
1163  bool isVecListTwoQWordIndexed() const {
1164    if (!isDoubleSpacedVectorIndexed()) return false;
1165    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1166  }
1167
1168  bool isVecListTwoQHWordIndexed() const {
1169    if (!isDoubleSpacedVectorIndexed()) return false;
1170    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1171  }
1172
1173  bool isVecListTwoDWordIndexed() const {
1174    if (!isSingleSpacedVectorIndexed()) return false;
1175    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1176  }
1177
1178  bool isVectorIndex8() const {
1179    if (Kind != k_VectorIndex) return false;
1180    return VectorIndex.Val < 8;
1181  }
1182  bool isVectorIndex16() const {
1183    if (Kind != k_VectorIndex) return false;
1184    return VectorIndex.Val < 4;
1185  }
1186  bool isVectorIndex32() const {
1187    if (Kind != k_VectorIndex) return false;
1188    return VectorIndex.Val < 2;
1189  }
1190
1191  bool isNEONi8splat() const {
1192    if (!isImm()) return false;
1193    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1194    // Must be a constant.
1195    if (!CE) return false;
1196    int64_t Value = CE->getValue();
1197    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1198    // value.
1199    return Value >= 0 && Value < 256;
1200  }
1201
1202  bool isNEONi16splat() const {
1203    if (!isImm()) return false;
1204    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1205    // Must be a constant.
1206    if (!CE) return false;
1207    int64_t Value = CE->getValue();
1208    // i16 value in the range [0,255] or [0x0100, 0xff00]
1209    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1210  }
1211
1212  bool isNEONi32splat() const {
1213    if (!isImm()) return false;
1214    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1215    // Must be a constant.
1216    if (!CE) return false;
1217    int64_t Value = CE->getValue();
1218    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1219    return (Value >= 0 && Value < 256) ||
1220      (Value >= 0x0100 && Value <= 0xff00) ||
1221      (Value >= 0x010000 && Value <= 0xff0000) ||
1222      (Value >= 0x01000000 && Value <= 0xff000000);
1223  }
1224
1225  bool isNEONi32vmov() const {
1226    if (!isImm()) return false;
1227    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1228    // Must be a constant.
1229    if (!CE) return false;
1230    int64_t Value = CE->getValue();
1231    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1232    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1233    return (Value >= 0 && Value < 256) ||
1234      (Value >= 0x0100 && Value <= 0xff00) ||
1235      (Value >= 0x010000 && Value <= 0xff0000) ||
1236      (Value >= 0x01000000 && Value <= 0xff000000) ||
1237      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1238      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1239  }
1240  bool isNEONi32vmovNeg() const {
1241    if (!isImm()) return false;
1242    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1243    // Must be a constant.
1244    if (!CE) return false;
1245    int64_t Value = ~CE->getValue();
1246    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1247    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1248    return (Value >= 0 && Value < 256) ||
1249      (Value >= 0x0100 && Value <= 0xff00) ||
1250      (Value >= 0x010000 && Value <= 0xff0000) ||
1251      (Value >= 0x01000000 && Value <= 0xff000000) ||
1252      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1253      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1254  }
1255
1256  bool isNEONi64splat() const {
1257    if (!isImm()) return false;
1258    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1259    // Must be a constant.
1260    if (!CE) return false;
1261    uint64_t Value = CE->getValue();
1262    // i64 value with each byte being either 0 or 0xff.
1263    for (unsigned i = 0; i < 8; ++i)
1264      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1265    return true;
1266  }
1267
1268  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1269    // Add as immediates when possible.  Null MCExpr = 0.
1270    if (Expr == 0)
1271      Inst.addOperand(MCOperand::CreateImm(0));
1272    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1273      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1274    else
1275      Inst.addOperand(MCOperand::CreateExpr(Expr));
1276  }
1277
1278  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1279    assert(N == 2 && "Invalid number of operands!");
1280    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1281    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1282    Inst.addOperand(MCOperand::CreateReg(RegNum));
1283  }
1284
1285  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1286    assert(N == 1 && "Invalid number of operands!");
1287    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1288  }
1289
1290  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1291    assert(N == 1 && "Invalid number of operands!");
1292    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1293  }
1294
1295  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1296    assert(N == 1 && "Invalid number of operands!");
1297    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1298  }
1299
1300  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1301    assert(N == 1 && "Invalid number of operands!");
1302    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1303  }
1304
1305  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1306    assert(N == 1 && "Invalid number of operands!");
1307    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1308  }
1309
1310  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1311    assert(N == 1 && "Invalid number of operands!");
1312    Inst.addOperand(MCOperand::CreateReg(getReg()));
1313  }
1314
1315  void addRegOperands(MCInst &Inst, unsigned N) const {
1316    assert(N == 1 && "Invalid number of operands!");
1317    Inst.addOperand(MCOperand::CreateReg(getReg()));
1318  }
1319
1320  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1321    assert(N == 3 && "Invalid number of operands!");
1322    assert(isRegShiftedReg() &&
1323           "addRegShiftedRegOperands() on non RegShiftedReg!");
1324    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1325    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1326    Inst.addOperand(MCOperand::CreateImm(
1327      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1328  }
1329
1330  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1331    assert(N == 2 && "Invalid number of operands!");
1332    assert(isRegShiftedImm() &&
1333           "addRegShiftedImmOperands() on non RegShiftedImm!");
1334    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1335    Inst.addOperand(MCOperand::CreateImm(
1336      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1337  }
1338
1339  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1340    assert(N == 1 && "Invalid number of operands!");
1341    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1342                                         ShifterImm.Imm));
1343  }
1344
1345  void addRegListOperands(MCInst &Inst, unsigned N) const {
1346    assert(N == 1 && "Invalid number of operands!");
1347    const SmallVectorImpl<unsigned> &RegList = getRegList();
1348    for (SmallVectorImpl<unsigned>::const_iterator
1349           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1350      Inst.addOperand(MCOperand::CreateReg(*I));
1351  }
1352
1353  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1354    addRegListOperands(Inst, N);
1355  }
1356
1357  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1358    addRegListOperands(Inst, N);
1359  }
1360
1361  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1362    assert(N == 1 && "Invalid number of operands!");
1363    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1364    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1365  }
1366
1367  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1368    assert(N == 1 && "Invalid number of operands!");
1369    // Munge the lsb/width into a bitfield mask.
1370    unsigned lsb = Bitfield.LSB;
1371    unsigned width = Bitfield.Width;
1372    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1373    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1374                      (32 - (lsb + width)));
1375    Inst.addOperand(MCOperand::CreateImm(Mask));
1376  }
1377
1378  void addImmOperands(MCInst &Inst, unsigned N) const {
1379    assert(N == 1 && "Invalid number of operands!");
1380    addExpr(Inst, getImm());
1381  }
1382
1383  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1386    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1387  }
1388
1389  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1390    assert(N == 1 && "Invalid number of operands!");
1391    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1392    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1393  }
1394
1395  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1396    assert(N == 1 && "Invalid number of operands!");
1397    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1398  }
1399
1400  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1401    assert(N == 1 && "Invalid number of operands!");
1402    // FIXME: We really want to scale the value here, but the LDRD/STRD
1403    // instruction don't encode operands that way yet.
1404    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1405    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1406  }
1407
1408  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1409    assert(N == 1 && "Invalid number of operands!");
1410    // The immediate is scaled by four in the encoding and is stored
1411    // in the MCInst as such. Lop off the low two bits here.
1412    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1413    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1414  }
1415
1416  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1417    assert(N == 1 && "Invalid number of operands!");
1418    // The immediate is scaled by four in the encoding and is stored
1419    // in the MCInst as such. Lop off the low two bits here.
1420    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1421    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1422  }
1423
1424  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1425    assert(N == 1 && "Invalid number of operands!");
1426    // The constant encodes as the immediate-1, and we store in the instruction
1427    // the bits as encoded, so subtract off one here.
1428    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1429    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1430  }
1431
1432  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1433    assert(N == 1 && "Invalid number of operands!");
1434    // The constant encodes as the immediate-1, and we store in the instruction
1435    // the bits as encoded, so subtract off one here.
1436    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1437    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1438  }
1439
1440  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1441    assert(N == 1 && "Invalid number of operands!");
1442    // The constant encodes as the immediate, except for 32, which encodes as
1443    // zero.
1444    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1445    unsigned Imm = CE->getValue();
1446    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1447  }
1448
1449  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1450    assert(N == 1 && "Invalid number of operands!");
1451    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1452    // the instruction as well.
1453    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1454    int Val = CE->getValue();
1455    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1456  }
1457
1458  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1459    assert(N == 1 && "Invalid number of operands!");
1460    // The operand is actually a t2_so_imm, but we have its bitwise
1461    // negation in the assembly source, so twiddle it here.
1462    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1463    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1464  }
1465
1466  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1467    assert(N == 1 && "Invalid number of operands!");
1468    // The operand is actually a t2_so_imm, but we have its
1469    // negation in the assembly source, so twiddle it here.
1470    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1471    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1472  }
1473
1474  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1475    assert(N == 1 && "Invalid number of operands!");
1476    // The operand is actually a so_imm, but we have its bitwise
1477    // negation in the assembly source, so twiddle it here.
1478    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1479    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1480  }
1481
1482  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1483    assert(N == 1 && "Invalid number of operands!");
1484    // The operand is actually a so_imm, but we have its
1485    // negation in the assembly source, so twiddle it here.
1486    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1487    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1488  }
1489
1490  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1491    assert(N == 1 && "Invalid number of operands!");
1492    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1493  }
1494
1495  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1498  }
1499
1500  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1501    assert(N == 1 && "Invalid number of operands!");
1502    int32_t Imm = Memory.OffsetImm->getValue();
1503    // FIXME: Handle #-0
1504    if (Imm == INT32_MIN) Imm = 0;
1505    Inst.addOperand(MCOperand::CreateImm(Imm));
1506  }
1507
1508  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1509    assert(N == 2 && "Invalid number of operands!");
1510    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1511    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1512  }
1513
1514  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1515    assert(N == 3 && "Invalid number of operands!");
1516    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1517    if (!Memory.OffsetRegNum) {
1518      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1519      // Special case for #-0
1520      if (Val == INT32_MIN) Val = 0;
1521      if (Val < 0) Val = -Val;
1522      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1523    } else {
1524      // For register offset, we encode the shift type and negation flag
1525      // here.
1526      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1527                              Memory.ShiftImm, Memory.ShiftType);
1528    }
1529    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1530    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1531    Inst.addOperand(MCOperand::CreateImm(Val));
1532  }
1533
1534  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1535    assert(N == 2 && "Invalid number of operands!");
1536    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1537    assert(CE && "non-constant AM2OffsetImm operand!");
1538    int32_t Val = CE->getValue();
1539    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1540    // Special case for #-0
1541    if (Val == INT32_MIN) Val = 0;
1542    if (Val < 0) Val = -Val;
1543    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1544    Inst.addOperand(MCOperand::CreateReg(0));
1545    Inst.addOperand(MCOperand::CreateImm(Val));
1546  }
1547
1548  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1549    assert(N == 3 && "Invalid number of operands!");
1550    // If we have an immediate that's not a constant, treat it as a label
1551    // reference needing a fixup. If it is a constant, it's something else
1552    // and we reject it.
1553    if (isImm()) {
1554      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1555      Inst.addOperand(MCOperand::CreateReg(0));
1556      Inst.addOperand(MCOperand::CreateImm(0));
1557      return;
1558    }
1559
1560    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1561    if (!Memory.OffsetRegNum) {
1562      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1563      // Special case for #-0
1564      if (Val == INT32_MIN) Val = 0;
1565      if (Val < 0) Val = -Val;
1566      Val = ARM_AM::getAM3Opc(AddSub, Val);
1567    } else {
1568      // For register offset, we encode the shift type and negation flag
1569      // here.
1570      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1571    }
1572    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1573    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1574    Inst.addOperand(MCOperand::CreateImm(Val));
1575  }
1576
1577  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1578    assert(N == 2 && "Invalid number of operands!");
1579    if (Kind == k_PostIndexRegister) {
1580      int32_t Val =
1581        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1582      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1583      Inst.addOperand(MCOperand::CreateImm(Val));
1584      return;
1585    }
1586
1587    // Constant offset.
1588    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1589    int32_t Val = CE->getValue();
1590    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1591    // Special case for #-0
1592    if (Val == INT32_MIN) Val = 0;
1593    if (Val < 0) Val = -Val;
1594    Val = ARM_AM::getAM3Opc(AddSub, Val);
1595    Inst.addOperand(MCOperand::CreateReg(0));
1596    Inst.addOperand(MCOperand::CreateImm(Val));
1597  }
1598
1599  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1600    assert(N == 2 && "Invalid number of operands!");
1601    // If we have an immediate that's not a constant, treat it as a label
1602    // reference needing a fixup. If it is a constant, it's something else
1603    // and we reject it.
1604    if (isImm()) {
1605      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1606      Inst.addOperand(MCOperand::CreateImm(0));
1607      return;
1608    }
1609
1610    // The lower two bits are always zero and as such are not encoded.
1611    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1612    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1613    // Special case for #-0
1614    if (Val == INT32_MIN) Val = 0;
1615    if (Val < 0) Val = -Val;
1616    Val = ARM_AM::getAM5Opc(AddSub, Val);
1617    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1618    Inst.addOperand(MCOperand::CreateImm(Val));
1619  }
1620
1621  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1622    assert(N == 2 && "Invalid number of operands!");
1623    // If we have an immediate that's not a constant, treat it as a label
1624    // reference needing a fixup. If it is a constant, it's something else
1625    // and we reject it.
1626    if (isImm()) {
1627      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1628      Inst.addOperand(MCOperand::CreateImm(0));
1629      return;
1630    }
1631
1632    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1633    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1634    Inst.addOperand(MCOperand::CreateImm(Val));
1635  }
1636
1637  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1638    assert(N == 2 && "Invalid number of operands!");
1639    // The lower two bits are always zero and as such are not encoded.
1640    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1641    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1642    Inst.addOperand(MCOperand::CreateImm(Val));
1643  }
1644
1645  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1646    assert(N == 2 && "Invalid number of operands!");
1647    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1648    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1649    Inst.addOperand(MCOperand::CreateImm(Val));
1650  }
1651
1652  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1653    addMemImm8OffsetOperands(Inst, N);
1654  }
1655
1656  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1657    addMemImm8OffsetOperands(Inst, N);
1658  }
1659
1660  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1661    assert(N == 2 && "Invalid number of operands!");
1662    // If this is an immediate, it's a label reference.
1663    if (isImm()) {
1664      addExpr(Inst, getImm());
1665      Inst.addOperand(MCOperand::CreateImm(0));
1666      return;
1667    }
1668
1669    // Otherwise, it's a normal memory reg+offset.
1670    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1671    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1672    Inst.addOperand(MCOperand::CreateImm(Val));
1673  }
1674
1675  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1676    assert(N == 2 && "Invalid number of operands!");
1677    // If this is an immediate, it's a label reference.
1678    if (isImm()) {
1679      addExpr(Inst, getImm());
1680      Inst.addOperand(MCOperand::CreateImm(0));
1681      return;
1682    }
1683
1684    // Otherwise, it's a normal memory reg+offset.
1685    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1686    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1687    Inst.addOperand(MCOperand::CreateImm(Val));
1688  }
1689
1690  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1691    assert(N == 2 && "Invalid number of operands!");
1692    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1693    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1694  }
1695
1696  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1697    assert(N == 2 && "Invalid number of operands!");
1698    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1699    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1700  }
1701
1702  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1703    assert(N == 3 && "Invalid number of operands!");
1704    unsigned Val =
1705      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1706                        Memory.ShiftImm, Memory.ShiftType);
1707    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1708    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1709    Inst.addOperand(MCOperand::CreateImm(Val));
1710  }
1711
1712  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1713    assert(N == 3 && "Invalid number of operands!");
1714    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1715    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1716    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1717  }
1718
1719  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1720    assert(N == 2 && "Invalid number of operands!");
1721    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1722    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1723  }
1724
1725  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1726    assert(N == 2 && "Invalid number of operands!");
1727    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1728    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1729    Inst.addOperand(MCOperand::CreateImm(Val));
1730  }
1731
1732  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1733    assert(N == 2 && "Invalid number of operands!");
1734    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1735    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1736    Inst.addOperand(MCOperand::CreateImm(Val));
1737  }
1738
1739  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1740    assert(N == 2 && "Invalid number of operands!");
1741    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1742    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1743    Inst.addOperand(MCOperand::CreateImm(Val));
1744  }
1745
1746  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1747    assert(N == 2 && "Invalid number of operands!");
1748    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1749    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1750    Inst.addOperand(MCOperand::CreateImm(Val));
1751  }
1752
1753  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1754    assert(N == 1 && "Invalid number of operands!");
1755    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1756    assert(CE && "non-constant post-idx-imm8 operand!");
1757    int Imm = CE->getValue();
1758    bool isAdd = Imm >= 0;
1759    if (Imm == INT32_MIN) Imm = 0;
1760    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1761    Inst.addOperand(MCOperand::CreateImm(Imm));
1762  }
1763
1764  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1765    assert(N == 1 && "Invalid number of operands!");
1766    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1767    assert(CE && "non-constant post-idx-imm8s4 operand!");
1768    int Imm = CE->getValue();
1769    bool isAdd = Imm >= 0;
1770    if (Imm == INT32_MIN) Imm = 0;
1771    // Immediate is scaled by 4.
1772    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1773    Inst.addOperand(MCOperand::CreateImm(Imm));
1774  }
1775
1776  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1777    assert(N == 2 && "Invalid number of operands!");
1778    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1779    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1780  }
1781
1782  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1783    assert(N == 2 && "Invalid number of operands!");
1784    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1785    // The sign, shift type, and shift amount are encoded in a single operand
1786    // using the AM2 encoding helpers.
1787    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1788    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1789                                     PostIdxReg.ShiftTy);
1790    Inst.addOperand(MCOperand::CreateImm(Imm));
1791  }
1792
1793  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1794    assert(N == 1 && "Invalid number of operands!");
1795    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1796  }
1797
1798  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1799    assert(N == 1 && "Invalid number of operands!");
1800    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1801  }
1802
1803  void addVecListOperands(MCInst &Inst, unsigned N) const {
1804    assert(N == 1 && "Invalid number of operands!");
1805    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1806  }
1807
1808  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1809    assert(N == 2 && "Invalid number of operands!");
1810    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1811    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1812  }
1813
1814  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1815    assert(N == 1 && "Invalid number of operands!");
1816    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1817  }
1818
1819  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1820    assert(N == 1 && "Invalid number of operands!");
1821    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1822  }
1823
1824  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1825    assert(N == 1 && "Invalid number of operands!");
1826    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1827  }
1828
1829  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1830    assert(N == 1 && "Invalid number of operands!");
1831    // The immediate encodes the type of constant as well as the value.
1832    // Mask in that this is an i8 splat.
1833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1834    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1835  }
1836
1837  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1838    assert(N == 1 && "Invalid number of operands!");
1839    // The immediate encodes the type of constant as well as the value.
1840    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1841    unsigned Value = CE->getValue();
1842    if (Value >= 256)
1843      Value = (Value >> 8) | 0xa00;
1844    else
1845      Value |= 0x800;
1846    Inst.addOperand(MCOperand::CreateImm(Value));
1847  }
1848
1849  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1850    assert(N == 1 && "Invalid number of operands!");
1851    // The immediate encodes the type of constant as well as the value.
1852    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1853    unsigned Value = CE->getValue();
1854    if (Value >= 256 && Value <= 0xff00)
1855      Value = (Value >> 8) | 0x200;
1856    else if (Value > 0xffff && Value <= 0xff0000)
1857      Value = (Value >> 16) | 0x400;
1858    else if (Value > 0xffffff)
1859      Value = (Value >> 24) | 0x600;
1860    Inst.addOperand(MCOperand::CreateImm(Value));
1861  }
1862
1863  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1864    assert(N == 1 && "Invalid number of operands!");
1865    // The immediate encodes the type of constant as well as the value.
1866    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1867    unsigned Value = CE->getValue();
1868    if (Value >= 256 && Value <= 0xffff)
1869      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1870    else if (Value > 0xffff && Value <= 0xffffff)
1871      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1872    else if (Value > 0xffffff)
1873      Value = (Value >> 24) | 0x600;
1874    Inst.addOperand(MCOperand::CreateImm(Value));
1875  }
1876
1877  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1878    assert(N == 1 && "Invalid number of operands!");
1879    // The immediate encodes the type of constant as well as the value.
1880    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1881    unsigned Value = ~CE->getValue();
1882    if (Value >= 256 && Value <= 0xffff)
1883      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1884    else if (Value > 0xffff && Value <= 0xffffff)
1885      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1886    else if (Value > 0xffffff)
1887      Value = (Value >> 24) | 0x600;
1888    Inst.addOperand(MCOperand::CreateImm(Value));
1889  }
1890
1891  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1892    assert(N == 1 && "Invalid number of operands!");
1893    // The immediate encodes the type of constant as well as the value.
1894    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1895    uint64_t Value = CE->getValue();
1896    unsigned Imm = 0;
1897    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1898      Imm |= (Value & 1) << i;
1899    }
1900    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1901  }
1902
1903  virtual void print(raw_ostream &OS) const;
1904
1905  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1906    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1907    Op->ITMask.Mask = Mask;
1908    Op->StartLoc = S;
1909    Op->EndLoc = S;
1910    return Op;
1911  }
1912
1913  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1914    ARMOperand *Op = new ARMOperand(k_CondCode);
1915    Op->CC.Val = CC;
1916    Op->StartLoc = S;
1917    Op->EndLoc = S;
1918    return Op;
1919  }
1920
1921  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1922    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1923    Op->Cop.Val = CopVal;
1924    Op->StartLoc = S;
1925    Op->EndLoc = S;
1926    return Op;
1927  }
1928
1929  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1930    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1931    Op->Cop.Val = CopVal;
1932    Op->StartLoc = S;
1933    Op->EndLoc = S;
1934    return Op;
1935  }
1936
1937  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1938    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1939    Op->Cop.Val = Val;
1940    Op->StartLoc = S;
1941    Op->EndLoc = E;
1942    return Op;
1943  }
1944
1945  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1946    ARMOperand *Op = new ARMOperand(k_CCOut);
1947    Op->Reg.RegNum = RegNum;
1948    Op->StartLoc = S;
1949    Op->EndLoc = S;
1950    return Op;
1951  }
1952
1953  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1954    ARMOperand *Op = new ARMOperand(k_Token);
1955    Op->Tok.Data = Str.data();
1956    Op->Tok.Length = Str.size();
1957    Op->StartLoc = S;
1958    Op->EndLoc = S;
1959    return Op;
1960  }
1961
1962  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1963    ARMOperand *Op = new ARMOperand(k_Register);
1964    Op->Reg.RegNum = RegNum;
1965    Op->StartLoc = S;
1966    Op->EndLoc = E;
1967    return Op;
1968  }
1969
1970  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1971                                           unsigned SrcReg,
1972                                           unsigned ShiftReg,
1973                                           unsigned ShiftImm,
1974                                           SMLoc S, SMLoc E) {
1975    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1976    Op->RegShiftedReg.ShiftTy = ShTy;
1977    Op->RegShiftedReg.SrcReg = SrcReg;
1978    Op->RegShiftedReg.ShiftReg = ShiftReg;
1979    Op->RegShiftedReg.ShiftImm = ShiftImm;
1980    Op->StartLoc = S;
1981    Op->EndLoc = E;
1982    return Op;
1983  }
1984
1985  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1986                                            unsigned SrcReg,
1987                                            unsigned ShiftImm,
1988                                            SMLoc S, SMLoc E) {
1989    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1990    Op->RegShiftedImm.ShiftTy = ShTy;
1991    Op->RegShiftedImm.SrcReg = SrcReg;
1992    Op->RegShiftedImm.ShiftImm = ShiftImm;
1993    Op->StartLoc = S;
1994    Op->EndLoc = E;
1995    return Op;
1996  }
1997
1998  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1999                                   SMLoc S, SMLoc E) {
2000    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2001    Op->ShifterImm.isASR = isASR;
2002    Op->ShifterImm.Imm = Imm;
2003    Op->StartLoc = S;
2004    Op->EndLoc = E;
2005    return Op;
2006  }
2007
2008  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2009    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2010    Op->RotImm.Imm = Imm;
2011    Op->StartLoc = S;
2012    Op->EndLoc = E;
2013    return Op;
2014  }
2015
2016  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2017                                    SMLoc S, SMLoc E) {
2018    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2019    Op->Bitfield.LSB = LSB;
2020    Op->Bitfield.Width = Width;
2021    Op->StartLoc = S;
2022    Op->EndLoc = E;
2023    return Op;
2024  }
2025
2026  static ARMOperand *
2027  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2028                SMLoc StartLoc, SMLoc EndLoc) {
2029    KindTy Kind = k_RegisterList;
2030
2031    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2032      Kind = k_DPRRegisterList;
2033    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2034             contains(Regs.front().first))
2035      Kind = k_SPRRegisterList;
2036
2037    ARMOperand *Op = new ARMOperand(Kind);
2038    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2039           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2040      Op->Registers.push_back(I->first);
2041    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2042    Op->StartLoc = StartLoc;
2043    Op->EndLoc = EndLoc;
2044    return Op;
2045  }
2046
2047  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2048                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2049    ARMOperand *Op = new ARMOperand(k_VectorList);
2050    Op->VectorList.RegNum = RegNum;
2051    Op->VectorList.Count = Count;
2052    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2053    Op->StartLoc = S;
2054    Op->EndLoc = E;
2055    return Op;
2056  }
2057
2058  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2059                                              bool isDoubleSpaced,
2060                                              SMLoc S, SMLoc E) {
2061    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2062    Op->VectorList.RegNum = RegNum;
2063    Op->VectorList.Count = Count;
2064    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2065    Op->StartLoc = S;
2066    Op->EndLoc = E;
2067    return Op;
2068  }
2069
2070  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2071                                             unsigned Index,
2072                                             bool isDoubleSpaced,
2073                                             SMLoc S, SMLoc E) {
2074    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2075    Op->VectorList.RegNum = RegNum;
2076    Op->VectorList.Count = Count;
2077    Op->VectorList.LaneIndex = Index;
2078    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2079    Op->StartLoc = S;
2080    Op->EndLoc = E;
2081    return Op;
2082  }
2083
2084  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2085                                       MCContext &Ctx) {
2086    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2087    Op->VectorIndex.Val = Idx;
2088    Op->StartLoc = S;
2089    Op->EndLoc = E;
2090    return Op;
2091  }
2092
2093  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2094    ARMOperand *Op = new ARMOperand(k_Immediate);
2095    Op->Imm.Val = Val;
2096    Op->StartLoc = S;
2097    Op->EndLoc = E;
2098    return Op;
2099  }
2100
2101  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2102    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2103    Op->FPImm.Val = Val;
2104    Op->StartLoc = S;
2105    Op->EndLoc = S;
2106    return Op;
2107  }
2108
2109  static ARMOperand *CreateMem(unsigned BaseRegNum,
2110                               const MCConstantExpr *OffsetImm,
2111                               unsigned OffsetRegNum,
2112                               ARM_AM::ShiftOpc ShiftType,
2113                               unsigned ShiftImm,
2114                               unsigned Alignment,
2115                               bool isNegative,
2116                               SMLoc S, SMLoc E) {
2117    ARMOperand *Op = new ARMOperand(k_Memory);
2118    Op->Memory.BaseRegNum = BaseRegNum;
2119    Op->Memory.OffsetImm = OffsetImm;
2120    Op->Memory.OffsetRegNum = OffsetRegNum;
2121    Op->Memory.ShiftType = ShiftType;
2122    Op->Memory.ShiftImm = ShiftImm;
2123    Op->Memory.Alignment = Alignment;
2124    Op->Memory.isNegative = isNegative;
2125    Op->StartLoc = S;
2126    Op->EndLoc = E;
2127    return Op;
2128  }
2129
2130  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2131                                      ARM_AM::ShiftOpc ShiftTy,
2132                                      unsigned ShiftImm,
2133                                      SMLoc S, SMLoc E) {
2134    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2135    Op->PostIdxReg.RegNum = RegNum;
2136    Op->PostIdxReg.isAdd = isAdd;
2137    Op->PostIdxReg.ShiftTy = ShiftTy;
2138    Op->PostIdxReg.ShiftImm = ShiftImm;
2139    Op->StartLoc = S;
2140    Op->EndLoc = E;
2141    return Op;
2142  }
2143
2144  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2145    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2146    Op->MBOpt.Val = Opt;
2147    Op->StartLoc = S;
2148    Op->EndLoc = S;
2149    return Op;
2150  }
2151
2152  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2153    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2154    Op->IFlags.Val = IFlags;
2155    Op->StartLoc = S;
2156    Op->EndLoc = S;
2157    return Op;
2158  }
2159
2160  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2161    ARMOperand *Op = new ARMOperand(k_MSRMask);
2162    Op->MMask.Val = MMask;
2163    Op->StartLoc = S;
2164    Op->EndLoc = S;
2165    return Op;
2166  }
2167};
2168
2169} // end anonymous namespace.
2170
2171void ARMOperand::print(raw_ostream &OS) const {
2172  switch (Kind) {
2173  case k_FPImmediate:
2174    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2175       << ") >";
2176    break;
2177  case k_CondCode:
2178    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2179    break;
2180  case k_CCOut:
2181    OS << "<ccout " << getReg() << ">";
2182    break;
2183  case k_ITCondMask: {
2184    static const char *MaskStr[] = {
2185      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2186      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2187    };
2188    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2189    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2190    break;
2191  }
2192  case k_CoprocNum:
2193    OS << "<coprocessor number: " << getCoproc() << ">";
2194    break;
2195  case k_CoprocReg:
2196    OS << "<coprocessor register: " << getCoproc() << ">";
2197    break;
2198  case k_CoprocOption:
2199    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2200    break;
2201  case k_MSRMask:
2202    OS << "<mask: " << getMSRMask() << ">";
2203    break;
2204  case k_Immediate:
2205    getImm()->print(OS);
2206    break;
2207  case k_MemBarrierOpt:
2208    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2209    break;
2210  case k_Memory:
2211    OS << "<memory "
2212       << " base:" << Memory.BaseRegNum;
2213    OS << ">";
2214    break;
2215  case k_PostIndexRegister:
2216    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2217       << PostIdxReg.RegNum;
2218    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2219      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2220         << PostIdxReg.ShiftImm;
2221    OS << ">";
2222    break;
2223  case k_ProcIFlags: {
2224    OS << "<ARM_PROC::";
2225    unsigned IFlags = getProcIFlags();
2226    for (int i=2; i >= 0; --i)
2227      if (IFlags & (1 << i))
2228        OS << ARM_PROC::IFlagsToString(1 << i);
2229    OS << ">";
2230    break;
2231  }
2232  case k_Register:
2233    OS << "<register " << getReg() << ">";
2234    break;
2235  case k_ShifterImmediate:
2236    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2237       << " #" << ShifterImm.Imm << ">";
2238    break;
2239  case k_ShiftedRegister:
2240    OS << "<so_reg_reg "
2241       << RegShiftedReg.SrcReg << " "
2242       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2243       << " " << RegShiftedReg.ShiftReg << ">";
2244    break;
2245  case k_ShiftedImmediate:
2246    OS << "<so_reg_imm "
2247       << RegShiftedImm.SrcReg << " "
2248       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2249       << " #" << RegShiftedImm.ShiftImm << ">";
2250    break;
2251  case k_RotateImmediate:
2252    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2253    break;
2254  case k_BitfieldDescriptor:
2255    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2256       << ", width: " << Bitfield.Width << ">";
2257    break;
2258  case k_RegisterList:
2259  case k_DPRRegisterList:
2260  case k_SPRRegisterList: {
2261    OS << "<register_list ";
2262
2263    const SmallVectorImpl<unsigned> &RegList = getRegList();
2264    for (SmallVectorImpl<unsigned>::const_iterator
2265           I = RegList.begin(), E = RegList.end(); I != E; ) {
2266      OS << *I;
2267      if (++I < E) OS << ", ";
2268    }
2269
2270    OS << ">";
2271    break;
2272  }
2273  case k_VectorList:
2274    OS << "<vector_list " << VectorList.Count << " * "
2275       << VectorList.RegNum << ">";
2276    break;
2277  case k_VectorListAllLanes:
2278    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2279       << VectorList.RegNum << ">";
2280    break;
2281  case k_VectorListIndexed:
2282    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2283       << VectorList.Count << " * " << VectorList.RegNum << ">";
2284    break;
2285  case k_Token:
2286    OS << "'" << getToken() << "'";
2287    break;
2288  case k_VectorIndex:
2289    OS << "<vectorindex " << getVectorIndex() << ">";
2290    break;
2291  }
2292}
2293
2294/// @name Auto-generated Match Functions
2295/// {
2296
2297static unsigned MatchRegisterName(StringRef Name);
2298
2299/// }
2300
2301bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2302                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2303  StartLoc = Parser.getTok().getLoc();
2304  RegNo = tryParseRegister();
2305  EndLoc = Parser.getTok().getLoc();
2306
2307  return (RegNo == (unsigned)-1);
2308}
2309
2310/// Try to parse a register name.  The token must be an Identifier when called,
2311/// and if it is a register name the token is eaten and the register number is
2312/// returned.  Otherwise return -1.
2313///
2314int ARMAsmParser::tryParseRegister() {
2315  const AsmToken &Tok = Parser.getTok();
2316  if (Tok.isNot(AsmToken::Identifier)) return -1;
2317
2318  std::string lowerCase = Tok.getString().lower();
2319  unsigned RegNum = MatchRegisterName(lowerCase);
2320  if (!RegNum) {
2321    RegNum = StringSwitch<unsigned>(lowerCase)
2322      .Case("r13", ARM::SP)
2323      .Case("r14", ARM::LR)
2324      .Case("r15", ARM::PC)
2325      .Case("ip", ARM::R12)
2326      // Additional register name aliases for 'gas' compatibility.
2327      .Case("a1", ARM::R0)
2328      .Case("a2", ARM::R1)
2329      .Case("a3", ARM::R2)
2330      .Case("a4", ARM::R3)
2331      .Case("v1", ARM::R4)
2332      .Case("v2", ARM::R5)
2333      .Case("v3", ARM::R6)
2334      .Case("v4", ARM::R7)
2335      .Case("v5", ARM::R8)
2336      .Case("v6", ARM::R9)
2337      .Case("v7", ARM::R10)
2338      .Case("v8", ARM::R11)
2339      .Case("sb", ARM::R9)
2340      .Case("sl", ARM::R10)
2341      .Case("fp", ARM::R11)
2342      .Default(0);
2343  }
2344  if (!RegNum) {
2345    // Check for aliases registered via .req. Canonicalize to lower case.
2346    // That's more consistent since register names are case insensitive, and
2347    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2348    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2349    // If no match, return failure.
2350    if (Entry == RegisterReqs.end())
2351      return -1;
2352    Parser.Lex(); // Eat identifier token.
2353    return Entry->getValue();
2354  }
2355
2356  Parser.Lex(); // Eat identifier token.
2357
2358  return RegNum;
2359}
2360
2361// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2362// If a recoverable error occurs, return 1. If an irrecoverable error
2363// occurs, return -1. An irrecoverable error is one where tokens have been
2364// consumed in the process of trying to parse the shifter (i.e., when it is
2365// indeed a shifter operand, but malformed).
2366int ARMAsmParser::tryParseShiftRegister(
2367                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2368  SMLoc S = Parser.getTok().getLoc();
2369  const AsmToken &Tok = Parser.getTok();
2370  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2371
2372  std::string lowerCase = Tok.getString().lower();
2373  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2374      .Case("asl", ARM_AM::lsl)
2375      .Case("lsl", ARM_AM::lsl)
2376      .Case("lsr", ARM_AM::lsr)
2377      .Case("asr", ARM_AM::asr)
2378      .Case("ror", ARM_AM::ror)
2379      .Case("rrx", ARM_AM::rrx)
2380      .Default(ARM_AM::no_shift);
2381
2382  if (ShiftTy == ARM_AM::no_shift)
2383    return 1;
2384
2385  Parser.Lex(); // Eat the operator.
2386
2387  // The source register for the shift has already been added to the
2388  // operand list, so we need to pop it off and combine it into the shifted
2389  // register operand instead.
2390  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2391  if (!PrevOp->isReg())
2392    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2393  int SrcReg = PrevOp->getReg();
2394  int64_t Imm = 0;
2395  int ShiftReg = 0;
2396  if (ShiftTy == ARM_AM::rrx) {
2397    // RRX Doesn't have an explicit shift amount. The encoder expects
2398    // the shift register to be the same as the source register. Seems odd,
2399    // but OK.
2400    ShiftReg = SrcReg;
2401  } else {
2402    // Figure out if this is shifted by a constant or a register (for non-RRX).
2403    if (Parser.getTok().is(AsmToken::Hash) ||
2404        Parser.getTok().is(AsmToken::Dollar)) {
2405      Parser.Lex(); // Eat hash.
2406      SMLoc ImmLoc = Parser.getTok().getLoc();
2407      const MCExpr *ShiftExpr = 0;
2408      if (getParser().ParseExpression(ShiftExpr)) {
2409        Error(ImmLoc, "invalid immediate shift value");
2410        return -1;
2411      }
2412      // The expression must be evaluatable as an immediate.
2413      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2414      if (!CE) {
2415        Error(ImmLoc, "invalid immediate shift value");
2416        return -1;
2417      }
2418      // Range check the immediate.
2419      // lsl, ror: 0 <= imm <= 31
2420      // lsr, asr: 0 <= imm <= 32
2421      Imm = CE->getValue();
2422      if (Imm < 0 ||
2423          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2424          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2425        Error(ImmLoc, "immediate shift value out of range");
2426        return -1;
2427      }
2428      // shift by zero is a nop. Always send it through as lsl.
2429      // ('as' compatibility)
2430      if (Imm == 0)
2431        ShiftTy = ARM_AM::lsl;
2432    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2433      ShiftReg = tryParseRegister();
2434      SMLoc L = Parser.getTok().getLoc();
2435      if (ShiftReg == -1) {
2436        Error (L, "expected immediate or register in shift operand");
2437        return -1;
2438      }
2439    } else {
2440      Error (Parser.getTok().getLoc(),
2441                    "expected immediate or register in shift operand");
2442      return -1;
2443    }
2444  }
2445
2446  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2447    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2448                                                         ShiftReg, Imm,
2449                                               S, Parser.getTok().getLoc()));
2450  else
2451    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2452                                               S, Parser.getTok().getLoc()));
2453
2454  return 0;
2455}
2456
2457
2458/// Try to parse a register name.  The token must be an Identifier when called.
2459/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2460/// if there is a "writeback". 'true' if it's not a register.
2461///
2462/// TODO this is likely to change to allow different register types and or to
2463/// parse for a specific register type.
2464bool ARMAsmParser::
2465tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2466  SMLoc S = Parser.getTok().getLoc();
2467  int RegNo = tryParseRegister();
2468  if (RegNo == -1)
2469    return true;
2470
2471  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2472
2473  const AsmToken &ExclaimTok = Parser.getTok();
2474  if (ExclaimTok.is(AsmToken::Exclaim)) {
2475    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2476                                               ExclaimTok.getLoc()));
2477    Parser.Lex(); // Eat exclaim token
2478    return false;
2479  }
2480
2481  // Also check for an index operand. This is only legal for vector registers,
2482  // but that'll get caught OK in operand matching, so we don't need to
2483  // explicitly filter everything else out here.
2484  if (Parser.getTok().is(AsmToken::LBrac)) {
2485    SMLoc SIdx = Parser.getTok().getLoc();
2486    Parser.Lex(); // Eat left bracket token.
2487
2488    const MCExpr *ImmVal;
2489    if (getParser().ParseExpression(ImmVal))
2490      return MatchOperand_ParseFail;
2491    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2492    if (!MCE) {
2493      TokError("immediate value expected for vector index");
2494      return MatchOperand_ParseFail;
2495    }
2496
2497    SMLoc E = Parser.getTok().getLoc();
2498    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2499      Error(E, "']' expected");
2500      return MatchOperand_ParseFail;
2501    }
2502
2503    Parser.Lex(); // Eat right bracket token.
2504
2505    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2506                                                     SIdx, E,
2507                                                     getContext()));
2508  }
2509
2510  return false;
2511}
2512
2513/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2514/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2515/// "c5", ...
2516static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2517  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2518  // but efficient.
2519  switch (Name.size()) {
2520  default: break;
2521  case 2:
2522    if (Name[0] != CoprocOp)
2523      return -1;
2524    switch (Name[1]) {
2525    default:  return -1;
2526    case '0': return 0;
2527    case '1': return 1;
2528    case '2': return 2;
2529    case '3': return 3;
2530    case '4': return 4;
2531    case '5': return 5;
2532    case '6': return 6;
2533    case '7': return 7;
2534    case '8': return 8;
2535    case '9': return 9;
2536    }
2537    break;
2538  case 3:
2539    if (Name[0] != CoprocOp || Name[1] != '1')
2540      return -1;
2541    switch (Name[2]) {
2542    default:  return -1;
2543    case '0': return 10;
2544    case '1': return 11;
2545    case '2': return 12;
2546    case '3': return 13;
2547    case '4': return 14;
2548    case '5': return 15;
2549    }
2550    break;
2551  }
2552
2553  return -1;
2554}
2555
2556/// parseITCondCode - Try to parse a condition code for an IT instruction.
2557ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2558parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2559  SMLoc S = Parser.getTok().getLoc();
2560  const AsmToken &Tok = Parser.getTok();
2561  if (!Tok.is(AsmToken::Identifier))
2562    return MatchOperand_NoMatch;
2563  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2564    .Case("eq", ARMCC::EQ)
2565    .Case("ne", ARMCC::NE)
2566    .Case("hs", ARMCC::HS)
2567    .Case("cs", ARMCC::HS)
2568    .Case("lo", ARMCC::LO)
2569    .Case("cc", ARMCC::LO)
2570    .Case("mi", ARMCC::MI)
2571    .Case("pl", ARMCC::PL)
2572    .Case("vs", ARMCC::VS)
2573    .Case("vc", ARMCC::VC)
2574    .Case("hi", ARMCC::HI)
2575    .Case("ls", ARMCC::LS)
2576    .Case("ge", ARMCC::GE)
2577    .Case("lt", ARMCC::LT)
2578    .Case("gt", ARMCC::GT)
2579    .Case("le", ARMCC::LE)
2580    .Case("al", ARMCC::AL)
2581    .Default(~0U);
2582  if (CC == ~0U)
2583    return MatchOperand_NoMatch;
2584  Parser.Lex(); // Eat the token.
2585
2586  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2587
2588  return MatchOperand_Success;
2589}
2590
2591/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2592/// token must be an Identifier when called, and if it is a coprocessor
2593/// number, the token is eaten and the operand is added to the operand list.
2594ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2595parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2596  SMLoc S = Parser.getTok().getLoc();
2597  const AsmToken &Tok = Parser.getTok();
2598  if (Tok.isNot(AsmToken::Identifier))
2599    return MatchOperand_NoMatch;
2600
2601  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2602  if (Num == -1)
2603    return MatchOperand_NoMatch;
2604
2605  Parser.Lex(); // Eat identifier token.
2606  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2607  return MatchOperand_Success;
2608}
2609
2610/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2611/// token must be an Identifier when called, and if it is a coprocessor
2612/// number, the token is eaten and the operand is added to the operand list.
2613ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2614parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2615  SMLoc S = Parser.getTok().getLoc();
2616  const AsmToken &Tok = Parser.getTok();
2617  if (Tok.isNot(AsmToken::Identifier))
2618    return MatchOperand_NoMatch;
2619
2620  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2621  if (Reg == -1)
2622    return MatchOperand_NoMatch;
2623
2624  Parser.Lex(); // Eat identifier token.
2625  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2626  return MatchOperand_Success;
2627}
2628
2629/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2630/// coproc_option : '{' imm0_255 '}'
2631ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2632parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2633  SMLoc S = Parser.getTok().getLoc();
2634
2635  // If this isn't a '{', this isn't a coprocessor immediate operand.
2636  if (Parser.getTok().isNot(AsmToken::LCurly))
2637    return MatchOperand_NoMatch;
2638  Parser.Lex(); // Eat the '{'
2639
2640  const MCExpr *Expr;
2641  SMLoc Loc = Parser.getTok().getLoc();
2642  if (getParser().ParseExpression(Expr)) {
2643    Error(Loc, "illegal expression");
2644    return MatchOperand_ParseFail;
2645  }
2646  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2647  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2648    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2649    return MatchOperand_ParseFail;
2650  }
2651  int Val = CE->getValue();
2652
2653  // Check for and consume the closing '}'
2654  if (Parser.getTok().isNot(AsmToken::RCurly))
2655    return MatchOperand_ParseFail;
2656  SMLoc E = Parser.getTok().getLoc();
2657  Parser.Lex(); // Eat the '}'
2658
2659  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2660  return MatchOperand_Success;
2661}
2662
2663// For register list parsing, we need to map from raw GPR register numbering
2664// to the enumeration values. The enumeration values aren't sorted by
2665// register number due to our using "sp", "lr" and "pc" as canonical names.
2666static unsigned getNextRegister(unsigned Reg) {
2667  // If this is a GPR, we need to do it manually, otherwise we can rely
2668  // on the sort ordering of the enumeration since the other reg-classes
2669  // are sane.
2670  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2671    return Reg + 1;
2672  switch(Reg) {
2673  default: assert(0 && "Invalid GPR number!");
2674  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2675  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2676  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2677  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2678  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2679  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2680  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2681  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2682  }
2683}
2684
2685// Return the low-subreg of a given Q register.
2686static unsigned getDRegFromQReg(unsigned QReg) {
2687  switch (QReg) {
2688  default: llvm_unreachable("expected a Q register!");
2689  case ARM::Q0:  return ARM::D0;
2690  case ARM::Q1:  return ARM::D2;
2691  case ARM::Q2:  return ARM::D4;
2692  case ARM::Q3:  return ARM::D6;
2693  case ARM::Q4:  return ARM::D8;
2694  case ARM::Q5:  return ARM::D10;
2695  case ARM::Q6:  return ARM::D12;
2696  case ARM::Q7:  return ARM::D14;
2697  case ARM::Q8:  return ARM::D16;
2698  case ARM::Q9:  return ARM::D18;
2699  case ARM::Q10: return ARM::D20;
2700  case ARM::Q11: return ARM::D22;
2701  case ARM::Q12: return ARM::D24;
2702  case ARM::Q13: return ARM::D26;
2703  case ARM::Q14: return ARM::D28;
2704  case ARM::Q15: return ARM::D30;
2705  }
2706}
2707
2708/// Parse a register list.
2709bool ARMAsmParser::
2710parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2711  assert(Parser.getTok().is(AsmToken::LCurly) &&
2712         "Token is not a Left Curly Brace");
2713  SMLoc S = Parser.getTok().getLoc();
2714  Parser.Lex(); // Eat '{' token.
2715  SMLoc RegLoc = Parser.getTok().getLoc();
2716
2717  // Check the first register in the list to see what register class
2718  // this is a list of.
2719  int Reg = tryParseRegister();
2720  if (Reg == -1)
2721    return Error(RegLoc, "register expected");
2722
2723  // The reglist instructions have at most 16 registers, so reserve
2724  // space for that many.
2725  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2726
2727  // Allow Q regs and just interpret them as the two D sub-registers.
2728  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2729    Reg = getDRegFromQReg(Reg);
2730    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2731    ++Reg;
2732  }
2733  const MCRegisterClass *RC;
2734  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2735    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2736  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2737    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2738  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2739    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2740  else
2741    return Error(RegLoc, "invalid register in register list");
2742
2743  // Store the register.
2744  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2745
2746  // This starts immediately after the first register token in the list,
2747  // so we can see either a comma or a minus (range separator) as a legal
2748  // next token.
2749  while (Parser.getTok().is(AsmToken::Comma) ||
2750         Parser.getTok().is(AsmToken::Minus)) {
2751    if (Parser.getTok().is(AsmToken::Minus)) {
2752      Parser.Lex(); // Eat the minus.
2753      SMLoc EndLoc = Parser.getTok().getLoc();
2754      int EndReg = tryParseRegister();
2755      if (EndReg == -1)
2756        return Error(EndLoc, "register expected");
2757      // Allow Q regs and just interpret them as the two D sub-registers.
2758      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2759        EndReg = getDRegFromQReg(EndReg) + 1;
2760      // If the register is the same as the start reg, there's nothing
2761      // more to do.
2762      if (Reg == EndReg)
2763        continue;
2764      // The register must be in the same register class as the first.
2765      if (!RC->contains(EndReg))
2766        return Error(EndLoc, "invalid register in register list");
2767      // Ranges must go from low to high.
2768      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2769        return Error(EndLoc, "bad range in register list");
2770
2771      // Add all the registers in the range to the register list.
2772      while (Reg != EndReg) {
2773        Reg = getNextRegister(Reg);
2774        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2775      }
2776      continue;
2777    }
2778    Parser.Lex(); // Eat the comma.
2779    RegLoc = Parser.getTok().getLoc();
2780    int OldReg = Reg;
2781    const AsmToken RegTok = Parser.getTok();
2782    Reg = tryParseRegister();
2783    if (Reg == -1)
2784      return Error(RegLoc, "register expected");
2785    // Allow Q regs and just interpret them as the two D sub-registers.
2786    bool isQReg = false;
2787    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2788      Reg = getDRegFromQReg(Reg);
2789      isQReg = true;
2790    }
2791    // The register must be in the same register class as the first.
2792    if (!RC->contains(Reg))
2793      return Error(RegLoc, "invalid register in register list");
2794    // List must be monotonically increasing.
2795    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2796      return Error(RegLoc, "register list not in ascending order");
2797    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2798      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2799              ") in register list");
2800      continue;
2801    }
2802    // VFP register lists must also be contiguous.
2803    // It's OK to use the enumeration values directly here rather, as the
2804    // VFP register classes have the enum sorted properly.
2805    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2806        Reg != OldReg + 1)
2807      return Error(RegLoc, "non-contiguous register range");
2808    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2809    if (isQReg)
2810      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2811  }
2812
2813  SMLoc E = Parser.getTok().getLoc();
2814  if (Parser.getTok().isNot(AsmToken::RCurly))
2815    return Error(E, "'}' expected");
2816  Parser.Lex(); // Eat '}' token.
2817
2818  // Push the register list operand.
2819  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2820
2821  // The ARM system instruction variants for LDM/STM have a '^' token here.
2822  if (Parser.getTok().is(AsmToken::Caret)) {
2823    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2824    Parser.Lex(); // Eat '^' token.
2825  }
2826
2827  return false;
2828}
2829
2830// Helper function to parse the lane index for vector lists.
2831ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2832parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2833  Index = 0; // Always return a defined index value.
2834  if (Parser.getTok().is(AsmToken::LBrac)) {
2835    Parser.Lex(); // Eat the '['.
2836    if (Parser.getTok().is(AsmToken::RBrac)) {
2837      // "Dn[]" is the 'all lanes' syntax.
2838      LaneKind = AllLanes;
2839      Parser.Lex(); // Eat the ']'.
2840      return MatchOperand_Success;
2841    }
2842    const MCExpr *LaneIndex;
2843    SMLoc Loc = Parser.getTok().getLoc();
2844    if (getParser().ParseExpression(LaneIndex)) {
2845      Error(Loc, "illegal expression");
2846      return MatchOperand_ParseFail;
2847    }
2848    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2849    if (!CE) {
2850      Error(Loc, "lane index must be empty or an integer");
2851      return MatchOperand_ParseFail;
2852    }
2853    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2854      Error(Parser.getTok().getLoc(), "']' expected");
2855      return MatchOperand_ParseFail;
2856    }
2857    Parser.Lex(); // Eat the ']'.
2858    int64_t Val = CE->getValue();
2859
2860    // FIXME: Make this range check context sensitive for .8, .16, .32.
2861    if (Val < 0 || Val > 7) {
2862      Error(Parser.getTok().getLoc(), "lane index out of range");
2863      return MatchOperand_ParseFail;
2864    }
2865    Index = Val;
2866    LaneKind = IndexedLane;
2867    return MatchOperand_Success;
2868  }
2869  LaneKind = NoLanes;
2870  return MatchOperand_Success;
2871}
2872
2873// parse a vector register list
2874ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2875parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2876  VectorLaneTy LaneKind;
2877  unsigned LaneIndex;
2878  SMLoc S = Parser.getTok().getLoc();
2879  // As an extension (to match gas), support a plain D register or Q register
2880  // (without encosing curly braces) as a single or double entry list,
2881  // respectively.
2882  if (Parser.getTok().is(AsmToken::Identifier)) {
2883    int Reg = tryParseRegister();
2884    if (Reg == -1)
2885      return MatchOperand_NoMatch;
2886    SMLoc E = Parser.getTok().getLoc();
2887    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2888      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2889      if (Res != MatchOperand_Success)
2890        return Res;
2891      switch (LaneKind) {
2892      case NoLanes:
2893        E = Parser.getTok().getLoc();
2894        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2895        break;
2896      case AllLanes:
2897        E = Parser.getTok().getLoc();
2898        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2899                                                                S, E));
2900        break;
2901      case IndexedLane:
2902        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2903                                                               LaneIndex,
2904                                                               false, S, E));
2905        break;
2906      }
2907      return MatchOperand_Success;
2908    }
2909    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2910      Reg = getDRegFromQReg(Reg);
2911      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2912      if (Res != MatchOperand_Success)
2913        return Res;
2914      switch (LaneKind) {
2915      case NoLanes:
2916        E = Parser.getTok().getLoc();
2917        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2918        break;
2919      case AllLanes:
2920        E = Parser.getTok().getLoc();
2921        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2922                                                                S, E));
2923        break;
2924      case IndexedLane:
2925        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2926                                                               LaneIndex,
2927                                                               false, S, E));
2928        break;
2929      }
2930      return MatchOperand_Success;
2931    }
2932    Error(S, "vector register expected");
2933    return MatchOperand_ParseFail;
2934  }
2935
2936  if (Parser.getTok().isNot(AsmToken::LCurly))
2937    return MatchOperand_NoMatch;
2938
2939  Parser.Lex(); // Eat '{' token.
2940  SMLoc RegLoc = Parser.getTok().getLoc();
2941
2942  int Reg = tryParseRegister();
2943  if (Reg == -1) {
2944    Error(RegLoc, "register expected");
2945    return MatchOperand_ParseFail;
2946  }
2947  unsigned Count = 1;
2948  int Spacing = 0;
2949  unsigned FirstReg = Reg;
2950  // The list is of D registers, but we also allow Q regs and just interpret
2951  // them as the two D sub-registers.
2952  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2953    FirstReg = Reg = getDRegFromQReg(Reg);
2954    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2955                 // it's ambiguous with four-register single spaced.
2956    ++Reg;
2957    ++Count;
2958  }
2959  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2960    return MatchOperand_ParseFail;
2961
2962  while (Parser.getTok().is(AsmToken::Comma) ||
2963         Parser.getTok().is(AsmToken::Minus)) {
2964    if (Parser.getTok().is(AsmToken::Minus)) {
2965      if (!Spacing)
2966        Spacing = 1; // Register range implies a single spaced list.
2967      else if (Spacing == 2) {
2968        Error(Parser.getTok().getLoc(),
2969              "sequential registers in double spaced list");
2970        return MatchOperand_ParseFail;
2971      }
2972      Parser.Lex(); // Eat the minus.
2973      SMLoc EndLoc = Parser.getTok().getLoc();
2974      int EndReg = tryParseRegister();
2975      if (EndReg == -1) {
2976        Error(EndLoc, "register expected");
2977        return MatchOperand_ParseFail;
2978      }
2979      // Allow Q regs and just interpret them as the two D sub-registers.
2980      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2981        EndReg = getDRegFromQReg(EndReg) + 1;
2982      // If the register is the same as the start reg, there's nothing
2983      // more to do.
2984      if (Reg == EndReg)
2985        continue;
2986      // The register must be in the same register class as the first.
2987      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2988        Error(EndLoc, "invalid register in register list");
2989        return MatchOperand_ParseFail;
2990      }
2991      // Ranges must go from low to high.
2992      if (Reg > EndReg) {
2993        Error(EndLoc, "bad range in register list");
2994        return MatchOperand_ParseFail;
2995      }
2996      // Parse the lane specifier if present.
2997      VectorLaneTy NextLaneKind;
2998      unsigned NextLaneIndex;
2999      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3000        return MatchOperand_ParseFail;
3001      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3002        Error(EndLoc, "mismatched lane index in register list");
3003        return MatchOperand_ParseFail;
3004      }
3005      EndLoc = Parser.getTok().getLoc();
3006
3007      // Add all the registers in the range to the register list.
3008      Count += EndReg - Reg;
3009      Reg = EndReg;
3010      continue;
3011    }
3012    Parser.Lex(); // Eat the comma.
3013    RegLoc = Parser.getTok().getLoc();
3014    int OldReg = Reg;
3015    Reg = tryParseRegister();
3016    if (Reg == -1) {
3017      Error(RegLoc, "register expected");
3018      return MatchOperand_ParseFail;
3019    }
3020    // vector register lists must be contiguous.
3021    // It's OK to use the enumeration values directly here rather, as the
3022    // VFP register classes have the enum sorted properly.
3023    //
3024    // The list is of D registers, but we also allow Q regs and just interpret
3025    // them as the two D sub-registers.
3026    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3027      if (!Spacing)
3028        Spacing = 1; // Register range implies a single spaced list.
3029      else if (Spacing == 2) {
3030        Error(RegLoc,
3031              "invalid register in double-spaced list (must be 'D' register')");
3032        return MatchOperand_ParseFail;
3033      }
3034      Reg = getDRegFromQReg(Reg);
3035      if (Reg != OldReg + 1) {
3036        Error(RegLoc, "non-contiguous register range");
3037        return MatchOperand_ParseFail;
3038      }
3039      ++Reg;
3040      Count += 2;
3041      // Parse the lane specifier if present.
3042      VectorLaneTy NextLaneKind;
3043      unsigned NextLaneIndex;
3044      SMLoc EndLoc = Parser.getTok().getLoc();
3045      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3046        return MatchOperand_ParseFail;
3047      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3048        Error(EndLoc, "mismatched lane index in register list");
3049        return MatchOperand_ParseFail;
3050      }
3051      continue;
3052    }
3053    // Normal D register.
3054    // Figure out the register spacing (single or double) of the list if
3055    // we don't know it already.
3056    if (!Spacing)
3057      Spacing = 1 + (Reg == OldReg + 2);
3058
3059    // Just check that it's contiguous and keep going.
3060    if (Reg != OldReg + Spacing) {
3061      Error(RegLoc, "non-contiguous register range");
3062      return MatchOperand_ParseFail;
3063    }
3064    ++Count;
3065    // Parse the lane specifier if present.
3066    VectorLaneTy NextLaneKind;
3067    unsigned NextLaneIndex;
3068    SMLoc EndLoc = Parser.getTok().getLoc();
3069    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3070      return MatchOperand_ParseFail;
3071    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3072      Error(EndLoc, "mismatched lane index in register list");
3073      return MatchOperand_ParseFail;
3074    }
3075  }
3076
3077  SMLoc E = Parser.getTok().getLoc();
3078  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3079    Error(E, "'}' expected");
3080    return MatchOperand_ParseFail;
3081  }
3082  Parser.Lex(); // Eat '}' token.
3083
3084  switch (LaneKind) {
3085  case NoLanes:
3086    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3087                                                    (Spacing == 2), S, E));
3088    break;
3089  case AllLanes:
3090    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3091                                                            (Spacing == 2),
3092                                                            S, E));
3093    break;
3094  case IndexedLane:
3095    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3096                                                           LaneIndex,
3097                                                           (Spacing == 2),
3098                                                           S, E));
3099    break;
3100  }
3101  return MatchOperand_Success;
3102}
3103
3104/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3105ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3106parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3107  SMLoc S = Parser.getTok().getLoc();
3108  const AsmToken &Tok = Parser.getTok();
3109  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3110  StringRef OptStr = Tok.getString();
3111
3112  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3113    .Case("sy",    ARM_MB::SY)
3114    .Case("st",    ARM_MB::ST)
3115    .Case("sh",    ARM_MB::ISH)
3116    .Case("ish",   ARM_MB::ISH)
3117    .Case("shst",  ARM_MB::ISHST)
3118    .Case("ishst", ARM_MB::ISHST)
3119    .Case("nsh",   ARM_MB::NSH)
3120    .Case("un",    ARM_MB::NSH)
3121    .Case("nshst", ARM_MB::NSHST)
3122    .Case("unst",  ARM_MB::NSHST)
3123    .Case("osh",   ARM_MB::OSH)
3124    .Case("oshst", ARM_MB::OSHST)
3125    .Default(~0U);
3126
3127  if (Opt == ~0U)
3128    return MatchOperand_NoMatch;
3129
3130  Parser.Lex(); // Eat identifier token.
3131  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3132  return MatchOperand_Success;
3133}
3134
3135/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3136ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3137parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3138  SMLoc S = Parser.getTok().getLoc();
3139  const AsmToken &Tok = Parser.getTok();
3140  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3141  StringRef IFlagsStr = Tok.getString();
3142
3143  // An iflags string of "none" is interpreted to mean that none of the AIF
3144  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3145  unsigned IFlags = 0;
3146  if (IFlagsStr != "none") {
3147        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3148      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3149        .Case("a", ARM_PROC::A)
3150        .Case("i", ARM_PROC::I)
3151        .Case("f", ARM_PROC::F)
3152        .Default(~0U);
3153
3154      // If some specific iflag is already set, it means that some letter is
3155      // present more than once, this is not acceptable.
3156      if (Flag == ~0U || (IFlags & Flag))
3157        return MatchOperand_NoMatch;
3158
3159      IFlags |= Flag;
3160    }
3161  }
3162
3163  Parser.Lex(); // Eat identifier token.
3164  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3165  return MatchOperand_Success;
3166}
3167
3168/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3169ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3170parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3171  SMLoc S = Parser.getTok().getLoc();
3172  const AsmToken &Tok = Parser.getTok();
3173  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3174  StringRef Mask = Tok.getString();
3175
3176  if (isMClass()) {
3177    // See ARMv6-M 10.1.1
3178    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3179      .Case("apsr", 0)
3180      .Case("iapsr", 1)
3181      .Case("eapsr", 2)
3182      .Case("xpsr", 3)
3183      .Case("ipsr", 5)
3184      .Case("epsr", 6)
3185      .Case("iepsr", 7)
3186      .Case("msp", 8)
3187      .Case("psp", 9)
3188      .Case("primask", 16)
3189      .Case("basepri", 17)
3190      .Case("basepri_max", 18)
3191      .Case("faultmask", 19)
3192      .Case("control", 20)
3193      .Default(~0U);
3194
3195    if (FlagsVal == ~0U)
3196      return MatchOperand_NoMatch;
3197
3198    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3199      // basepri, basepri_max and faultmask only valid for V7m.
3200      return MatchOperand_NoMatch;
3201
3202    Parser.Lex(); // Eat identifier token.
3203    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3204    return MatchOperand_Success;
3205  }
3206
3207  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3208  size_t Start = 0, Next = Mask.find('_');
3209  StringRef Flags = "";
3210  std::string SpecReg = Mask.slice(Start, Next).lower();
3211  if (Next != StringRef::npos)
3212    Flags = Mask.slice(Next+1, Mask.size());
3213
3214  // FlagsVal contains the complete mask:
3215  // 3-0: Mask
3216  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3217  unsigned FlagsVal = 0;
3218
3219  if (SpecReg == "apsr") {
3220    FlagsVal = StringSwitch<unsigned>(Flags)
3221    .Case("nzcvq",  0x8) // same as CPSR_f
3222    .Case("g",      0x4) // same as CPSR_s
3223    .Case("nzcvqg", 0xc) // same as CPSR_fs
3224    .Default(~0U);
3225
3226    if (FlagsVal == ~0U) {
3227      if (!Flags.empty())
3228        return MatchOperand_NoMatch;
3229      else
3230        FlagsVal = 8; // No flag
3231    }
3232  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3233    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3234      Flags = "fc";
3235    for (int i = 0, e = Flags.size(); i != e; ++i) {
3236      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3237      .Case("c", 1)
3238      .Case("x", 2)
3239      .Case("s", 4)
3240      .Case("f", 8)
3241      .Default(~0U);
3242
3243      // If some specific flag is already set, it means that some letter is
3244      // present more than once, this is not acceptable.
3245      if (FlagsVal == ~0U || (FlagsVal & Flag))
3246        return MatchOperand_NoMatch;
3247      FlagsVal |= Flag;
3248    }
3249  } else // No match for special register.
3250    return MatchOperand_NoMatch;
3251
3252  // Special register without flags is NOT equivalent to "fc" flags.
3253  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3254  // two lines would enable gas compatibility at the expense of breaking
3255  // round-tripping.
3256  //
3257  // if (!FlagsVal)
3258  //  FlagsVal = 0x9;
3259
3260  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3261  if (SpecReg == "spsr")
3262    FlagsVal |= 16;
3263
3264  Parser.Lex(); // Eat identifier token.
3265  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3266  return MatchOperand_Success;
3267}
3268
3269ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3270parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3271            int Low, int High) {
3272  const AsmToken &Tok = Parser.getTok();
3273  if (Tok.isNot(AsmToken::Identifier)) {
3274    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3275    return MatchOperand_ParseFail;
3276  }
3277  StringRef ShiftName = Tok.getString();
3278  std::string LowerOp = Op.lower();
3279  std::string UpperOp = Op.upper();
3280  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3281    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3282    return MatchOperand_ParseFail;
3283  }
3284  Parser.Lex(); // Eat shift type token.
3285
3286  // There must be a '#' and a shift amount.
3287  if (Parser.getTok().isNot(AsmToken::Hash) &&
3288      Parser.getTok().isNot(AsmToken::Dollar)) {
3289    Error(Parser.getTok().getLoc(), "'#' expected");
3290    return MatchOperand_ParseFail;
3291  }
3292  Parser.Lex(); // Eat hash token.
3293
3294  const MCExpr *ShiftAmount;
3295  SMLoc Loc = Parser.getTok().getLoc();
3296  if (getParser().ParseExpression(ShiftAmount)) {
3297    Error(Loc, "illegal expression");
3298    return MatchOperand_ParseFail;
3299  }
3300  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3301  if (!CE) {
3302    Error(Loc, "constant expression expected");
3303    return MatchOperand_ParseFail;
3304  }
3305  int Val = CE->getValue();
3306  if (Val < Low || Val > High) {
3307    Error(Loc, "immediate value out of range");
3308    return MatchOperand_ParseFail;
3309  }
3310
3311  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3312
3313  return MatchOperand_Success;
3314}
3315
3316ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3317parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3318  const AsmToken &Tok = Parser.getTok();
3319  SMLoc S = Tok.getLoc();
3320  if (Tok.isNot(AsmToken::Identifier)) {
3321    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3322    return MatchOperand_ParseFail;
3323  }
3324  int Val = StringSwitch<int>(Tok.getString())
3325    .Case("be", 1)
3326    .Case("le", 0)
3327    .Default(-1);
3328  Parser.Lex(); // Eat the token.
3329
3330  if (Val == -1) {
3331    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3332    return MatchOperand_ParseFail;
3333  }
3334  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3335                                                                  getContext()),
3336                                           S, Parser.getTok().getLoc()));
3337  return MatchOperand_Success;
3338}
3339
3340/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3341/// instructions. Legal values are:
3342///     lsl #n  'n' in [0,31]
3343///     asr #n  'n' in [1,32]
3344///             n == 32 encoded as n == 0.
3345ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3346parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3347  const AsmToken &Tok = Parser.getTok();
3348  SMLoc S = Tok.getLoc();
3349  if (Tok.isNot(AsmToken::Identifier)) {
3350    Error(S, "shift operator 'asr' or 'lsl' expected");
3351    return MatchOperand_ParseFail;
3352  }
3353  StringRef ShiftName = Tok.getString();
3354  bool isASR;
3355  if (ShiftName == "lsl" || ShiftName == "LSL")
3356    isASR = false;
3357  else if (ShiftName == "asr" || ShiftName == "ASR")
3358    isASR = true;
3359  else {
3360    Error(S, "shift operator 'asr' or 'lsl' expected");
3361    return MatchOperand_ParseFail;
3362  }
3363  Parser.Lex(); // Eat the operator.
3364
3365  // A '#' and a shift amount.
3366  if (Parser.getTok().isNot(AsmToken::Hash) &&
3367      Parser.getTok().isNot(AsmToken::Dollar)) {
3368    Error(Parser.getTok().getLoc(), "'#' expected");
3369    return MatchOperand_ParseFail;
3370  }
3371  Parser.Lex(); // Eat hash token.
3372
3373  const MCExpr *ShiftAmount;
3374  SMLoc E = Parser.getTok().getLoc();
3375  if (getParser().ParseExpression(ShiftAmount)) {
3376    Error(E, "malformed shift expression");
3377    return MatchOperand_ParseFail;
3378  }
3379  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3380  if (!CE) {
3381    Error(E, "shift amount must be an immediate");
3382    return MatchOperand_ParseFail;
3383  }
3384
3385  int64_t Val = CE->getValue();
3386  if (isASR) {
3387    // Shift amount must be in [1,32]
3388    if (Val < 1 || Val > 32) {
3389      Error(E, "'asr' shift amount must be in range [1,32]");
3390      return MatchOperand_ParseFail;
3391    }
3392    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3393    if (isThumb() && Val == 32) {
3394      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3395      return MatchOperand_ParseFail;
3396    }
3397    if (Val == 32) Val = 0;
3398  } else {
3399    // Shift amount must be in [1,32]
3400    if (Val < 0 || Val > 31) {
3401      Error(E, "'lsr' shift amount must be in range [0,31]");
3402      return MatchOperand_ParseFail;
3403    }
3404  }
3405
3406  E = Parser.getTok().getLoc();
3407  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3408
3409  return MatchOperand_Success;
3410}
3411
3412/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3413/// of instructions. Legal values are:
3414///     ror #n  'n' in {0, 8, 16, 24}
3415ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3416parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3417  const AsmToken &Tok = Parser.getTok();
3418  SMLoc S = Tok.getLoc();
3419  if (Tok.isNot(AsmToken::Identifier))
3420    return MatchOperand_NoMatch;
3421  StringRef ShiftName = Tok.getString();
3422  if (ShiftName != "ror" && ShiftName != "ROR")
3423    return MatchOperand_NoMatch;
3424  Parser.Lex(); // Eat the operator.
3425
3426  // A '#' and a rotate amount.
3427  if (Parser.getTok().isNot(AsmToken::Hash) &&
3428      Parser.getTok().isNot(AsmToken::Dollar)) {
3429    Error(Parser.getTok().getLoc(), "'#' expected");
3430    return MatchOperand_ParseFail;
3431  }
3432  Parser.Lex(); // Eat hash token.
3433
3434  const MCExpr *ShiftAmount;
3435  SMLoc E = Parser.getTok().getLoc();
3436  if (getParser().ParseExpression(ShiftAmount)) {
3437    Error(E, "malformed rotate expression");
3438    return MatchOperand_ParseFail;
3439  }
3440  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3441  if (!CE) {
3442    Error(E, "rotate amount must be an immediate");
3443    return MatchOperand_ParseFail;
3444  }
3445
3446  int64_t Val = CE->getValue();
3447  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3448  // normally, zero is represented in asm by omitting the rotate operand
3449  // entirely.
3450  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3451    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3452    return MatchOperand_ParseFail;
3453  }
3454
3455  E = Parser.getTok().getLoc();
3456  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3457
3458  return MatchOperand_Success;
3459}
3460
3461ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3462parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3463  SMLoc S = Parser.getTok().getLoc();
3464  // The bitfield descriptor is really two operands, the LSB and the width.
3465  if (Parser.getTok().isNot(AsmToken::Hash) &&
3466      Parser.getTok().isNot(AsmToken::Dollar)) {
3467    Error(Parser.getTok().getLoc(), "'#' expected");
3468    return MatchOperand_ParseFail;
3469  }
3470  Parser.Lex(); // Eat hash token.
3471
3472  const MCExpr *LSBExpr;
3473  SMLoc E = Parser.getTok().getLoc();
3474  if (getParser().ParseExpression(LSBExpr)) {
3475    Error(E, "malformed immediate expression");
3476    return MatchOperand_ParseFail;
3477  }
3478  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3479  if (!CE) {
3480    Error(E, "'lsb' operand must be an immediate");
3481    return MatchOperand_ParseFail;
3482  }
3483
3484  int64_t LSB = CE->getValue();
3485  // The LSB must be in the range [0,31]
3486  if (LSB < 0 || LSB > 31) {
3487    Error(E, "'lsb' operand must be in the range [0,31]");
3488    return MatchOperand_ParseFail;
3489  }
3490  E = Parser.getTok().getLoc();
3491
3492  // Expect another immediate operand.
3493  if (Parser.getTok().isNot(AsmToken::Comma)) {
3494    Error(Parser.getTok().getLoc(), "too few operands");
3495    return MatchOperand_ParseFail;
3496  }
3497  Parser.Lex(); // Eat hash token.
3498  if (Parser.getTok().isNot(AsmToken::Hash) &&
3499      Parser.getTok().isNot(AsmToken::Dollar)) {
3500    Error(Parser.getTok().getLoc(), "'#' expected");
3501    return MatchOperand_ParseFail;
3502  }
3503  Parser.Lex(); // Eat hash token.
3504
3505  const MCExpr *WidthExpr;
3506  if (getParser().ParseExpression(WidthExpr)) {
3507    Error(E, "malformed immediate expression");
3508    return MatchOperand_ParseFail;
3509  }
3510  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3511  if (!CE) {
3512    Error(E, "'width' operand must be an immediate");
3513    return MatchOperand_ParseFail;
3514  }
3515
3516  int64_t Width = CE->getValue();
3517  // The LSB must be in the range [1,32-lsb]
3518  if (Width < 1 || Width > 32 - LSB) {
3519    Error(E, "'width' operand must be in the range [1,32-lsb]");
3520    return MatchOperand_ParseFail;
3521  }
3522  E = Parser.getTok().getLoc();
3523
3524  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3525
3526  return MatchOperand_Success;
3527}
3528
3529ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3530parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3531  // Check for a post-index addressing register operand. Specifically:
3532  // postidx_reg := '+' register {, shift}
3533  //              | '-' register {, shift}
3534  //              | register {, shift}
3535
3536  // This method must return MatchOperand_NoMatch without consuming any tokens
3537  // in the case where there is no match, as other alternatives take other
3538  // parse methods.
3539  AsmToken Tok = Parser.getTok();
3540  SMLoc S = Tok.getLoc();
3541  bool haveEaten = false;
3542  bool isAdd = true;
3543  int Reg = -1;
3544  if (Tok.is(AsmToken::Plus)) {
3545    Parser.Lex(); // Eat the '+' token.
3546    haveEaten = true;
3547  } else if (Tok.is(AsmToken::Minus)) {
3548    Parser.Lex(); // Eat the '-' token.
3549    isAdd = false;
3550    haveEaten = true;
3551  }
3552  if (Parser.getTok().is(AsmToken::Identifier))
3553    Reg = tryParseRegister();
3554  if (Reg == -1) {
3555    if (!haveEaten)
3556      return MatchOperand_NoMatch;
3557    Error(Parser.getTok().getLoc(), "register expected");
3558    return MatchOperand_ParseFail;
3559  }
3560  SMLoc E = Parser.getTok().getLoc();
3561
3562  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3563  unsigned ShiftImm = 0;
3564  if (Parser.getTok().is(AsmToken::Comma)) {
3565    Parser.Lex(); // Eat the ','.
3566    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3567      return MatchOperand_ParseFail;
3568  }
3569
3570  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3571                                                  ShiftImm, S, E));
3572
3573  return MatchOperand_Success;
3574}
3575
3576ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3577parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3578  // Check for a post-index addressing register operand. Specifically:
3579  // am3offset := '+' register
3580  //              | '-' register
3581  //              | register
3582  //              | # imm
3583  //              | # + imm
3584  //              | # - imm
3585
3586  // This method must return MatchOperand_NoMatch without consuming any tokens
3587  // in the case where there is no match, as other alternatives take other
3588  // parse methods.
3589  AsmToken Tok = Parser.getTok();
3590  SMLoc S = Tok.getLoc();
3591
3592  // Do immediates first, as we always parse those if we have a '#'.
3593  if (Parser.getTok().is(AsmToken::Hash) ||
3594      Parser.getTok().is(AsmToken::Dollar)) {
3595    Parser.Lex(); // Eat the '#'.
3596    // Explicitly look for a '-', as we need to encode negative zero
3597    // differently.
3598    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3599    const MCExpr *Offset;
3600    if (getParser().ParseExpression(Offset))
3601      return MatchOperand_ParseFail;
3602    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3603    if (!CE) {
3604      Error(S, "constant expression expected");
3605      return MatchOperand_ParseFail;
3606    }
3607    SMLoc E = Tok.getLoc();
3608    // Negative zero is encoded as the flag value INT32_MIN.
3609    int32_t Val = CE->getValue();
3610    if (isNegative && Val == 0)
3611      Val = INT32_MIN;
3612
3613    Operands.push_back(
3614      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3615
3616    return MatchOperand_Success;
3617  }
3618
3619
3620  bool haveEaten = false;
3621  bool isAdd = true;
3622  int Reg = -1;
3623  if (Tok.is(AsmToken::Plus)) {
3624    Parser.Lex(); // Eat the '+' token.
3625    haveEaten = true;
3626  } else if (Tok.is(AsmToken::Minus)) {
3627    Parser.Lex(); // Eat the '-' token.
3628    isAdd = false;
3629    haveEaten = true;
3630  }
3631  if (Parser.getTok().is(AsmToken::Identifier))
3632    Reg = tryParseRegister();
3633  if (Reg == -1) {
3634    if (!haveEaten)
3635      return MatchOperand_NoMatch;
3636    Error(Parser.getTok().getLoc(), "register expected");
3637    return MatchOperand_ParseFail;
3638  }
3639  SMLoc E = Parser.getTok().getLoc();
3640
3641  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3642                                                  0, S, E));
3643
3644  return MatchOperand_Success;
3645}
3646
3647/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3648/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3649/// when they refer multiple MIOperands inside a single one.
3650bool ARMAsmParser::
3651cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3652             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3653  // Rt, Rt2
3654  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3655  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3656  // Create a writeback register dummy placeholder.
3657  Inst.addOperand(MCOperand::CreateReg(0));
3658  // addr
3659  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3660  // pred
3661  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3662  return true;
3663}
3664
3665/// cvtT2StrdPre - Convert parsed operands to MCInst.
3666/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3667/// when they refer multiple MIOperands inside a single one.
3668bool ARMAsmParser::
3669cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3670             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3671  // Create a writeback register dummy placeholder.
3672  Inst.addOperand(MCOperand::CreateReg(0));
3673  // Rt, Rt2
3674  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3675  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3676  // addr
3677  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3678  // pred
3679  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3680  return true;
3681}
3682
3683/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3684/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3685/// when they refer multiple MIOperands inside a single one.
3686bool ARMAsmParser::
3687cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3688                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3689  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3690
3691  // Create a writeback register dummy placeholder.
3692  Inst.addOperand(MCOperand::CreateImm(0));
3693
3694  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3695  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3696  return true;
3697}
3698
3699/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3700/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3701/// when they refer multiple MIOperands inside a single one.
3702bool ARMAsmParser::
3703cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3704                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3705  // Create a writeback register dummy placeholder.
3706  Inst.addOperand(MCOperand::CreateImm(0));
3707  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3708  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3709  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3710  return true;
3711}
3712
3713/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3714/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3715/// when they refer multiple MIOperands inside a single one.
3716bool ARMAsmParser::
3717cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3718                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3719  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3720
3721  // Create a writeback register dummy placeholder.
3722  Inst.addOperand(MCOperand::CreateImm(0));
3723
3724  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3725  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3726  return true;
3727}
3728
3729/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3730/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3731/// when they refer multiple MIOperands inside a single one.
3732bool ARMAsmParser::
3733cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3734                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3735  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3736
3737  // Create a writeback register dummy placeholder.
3738  Inst.addOperand(MCOperand::CreateImm(0));
3739
3740  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3741  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3742  return true;
3743}
3744
3745
3746/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3747/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3748/// when they refer multiple MIOperands inside a single one.
3749bool ARMAsmParser::
3750cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3751                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3752  // Create a writeback register dummy placeholder.
3753  Inst.addOperand(MCOperand::CreateImm(0));
3754  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3755  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3756  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3757  return true;
3758}
3759
3760/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3761/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3762/// when they refer multiple MIOperands inside a single one.
3763bool ARMAsmParser::
3764cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3765                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3766  // Create a writeback register dummy placeholder.
3767  Inst.addOperand(MCOperand::CreateImm(0));
3768  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3769  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3770  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3771  return true;
3772}
3773
3774/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3775/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3776/// when they refer multiple MIOperands inside a single one.
3777bool ARMAsmParser::
3778cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3779                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3780  // Create a writeback register dummy placeholder.
3781  Inst.addOperand(MCOperand::CreateImm(0));
3782  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3783  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3784  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3785  return true;
3786}
3787
3788/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3789/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3790/// when they refer multiple MIOperands inside a single one.
3791bool ARMAsmParser::
3792cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3793                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3794  // Rt
3795  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3796  // Create a writeback register dummy placeholder.
3797  Inst.addOperand(MCOperand::CreateImm(0));
3798  // addr
3799  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3800  // offset
3801  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3802  // pred
3803  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3804  return true;
3805}
3806
3807/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3808/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3809/// when they refer multiple MIOperands inside a single one.
3810bool ARMAsmParser::
3811cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3812                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3813  // Rt
3814  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3815  // Create a writeback register dummy placeholder.
3816  Inst.addOperand(MCOperand::CreateImm(0));
3817  // addr
3818  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3819  // offset
3820  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3821  // pred
3822  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3823  return true;
3824}
3825
3826/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3827/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3828/// when they refer multiple MIOperands inside a single one.
3829bool ARMAsmParser::
3830cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3831                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3832  // Create a writeback register dummy placeholder.
3833  Inst.addOperand(MCOperand::CreateImm(0));
3834  // Rt
3835  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3836  // addr
3837  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3838  // offset
3839  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3840  // pred
3841  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3842  return true;
3843}
3844
3845/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3846/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3847/// when they refer multiple MIOperands inside a single one.
3848bool ARMAsmParser::
3849cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3850                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3851  // Create a writeback register dummy placeholder.
3852  Inst.addOperand(MCOperand::CreateImm(0));
3853  // Rt
3854  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3855  // addr
3856  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3857  // offset
3858  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3859  // pred
3860  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3861  return true;
3862}
3863
3864/// cvtLdrdPre - Convert parsed operands to MCInst.
3865/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3866/// when they refer multiple MIOperands inside a single one.
3867bool ARMAsmParser::
3868cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3869           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3870  // Rt, Rt2
3871  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3872  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3873  // Create a writeback register dummy placeholder.
3874  Inst.addOperand(MCOperand::CreateImm(0));
3875  // addr
3876  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3877  // pred
3878  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3879  return true;
3880}
3881
3882/// cvtStrdPre - Convert parsed operands to MCInst.
3883/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3884/// when they refer multiple MIOperands inside a single one.
3885bool ARMAsmParser::
3886cvtStrdPre(MCInst &Inst, unsigned Opcode,
3887           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3888  // Create a writeback register dummy placeholder.
3889  Inst.addOperand(MCOperand::CreateImm(0));
3890  // Rt, Rt2
3891  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3892  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3893  // addr
3894  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3895  // pred
3896  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3897  return true;
3898}
3899
3900/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3901/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3902/// when they refer multiple MIOperands inside a single one.
3903bool ARMAsmParser::
3904cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3905                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3906  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3907  // Create a writeback register dummy placeholder.
3908  Inst.addOperand(MCOperand::CreateImm(0));
3909  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3910  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3911  return true;
3912}
3913
3914/// cvtThumbMultiple- Convert parsed operands to MCInst.
3915/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3916/// when they refer multiple MIOperands inside a single one.
3917bool ARMAsmParser::
3918cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3919           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3920  // The second source operand must be the same register as the destination
3921  // operand.
3922  if (Operands.size() == 6 &&
3923      (((ARMOperand*)Operands[3])->getReg() !=
3924       ((ARMOperand*)Operands[5])->getReg()) &&
3925      (((ARMOperand*)Operands[3])->getReg() !=
3926       ((ARMOperand*)Operands[4])->getReg())) {
3927    Error(Operands[3]->getStartLoc(),
3928          "destination register must match source register");
3929    return false;
3930  }
3931  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3932  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3933  // If we have a three-operand form, make sure to set Rn to be the operand
3934  // that isn't the same as Rd.
3935  unsigned RegOp = 4;
3936  if (Operands.size() == 6 &&
3937      ((ARMOperand*)Operands[4])->getReg() ==
3938        ((ARMOperand*)Operands[3])->getReg())
3939    RegOp = 5;
3940  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3941  Inst.addOperand(Inst.getOperand(0));
3942  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3943
3944  return true;
3945}
3946
3947bool ARMAsmParser::
3948cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3949              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3950  // Vd
3951  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3952  // Create a writeback register dummy placeholder.
3953  Inst.addOperand(MCOperand::CreateImm(0));
3954  // Vn
3955  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3956  // pred
3957  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3958  return true;
3959}
3960
3961bool ARMAsmParser::
3962cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3963                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3964  // Vd
3965  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3966  // Create a writeback register dummy placeholder.
3967  Inst.addOperand(MCOperand::CreateImm(0));
3968  // Vn
3969  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3970  // Vm
3971  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3972  // pred
3973  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3974  return true;
3975}
3976
3977bool ARMAsmParser::
3978cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3979              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3980  // Create a writeback register dummy placeholder.
3981  Inst.addOperand(MCOperand::CreateImm(0));
3982  // Vn
3983  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3984  // Vt
3985  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3986  // pred
3987  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3988  return true;
3989}
3990
3991bool ARMAsmParser::
3992cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3993                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3994  // Create a writeback register dummy placeholder.
3995  Inst.addOperand(MCOperand::CreateImm(0));
3996  // Vn
3997  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3998  // Vm
3999  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4000  // Vt
4001  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4002  // pred
4003  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4004  return true;
4005}
4006
4007/// Parse an ARM memory expression, return false if successful else return true
4008/// or an error.  The first token must be a '[' when called.
4009bool ARMAsmParser::
4010parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4011  SMLoc S, E;
4012  assert(Parser.getTok().is(AsmToken::LBrac) &&
4013         "Token is not a Left Bracket");
4014  S = Parser.getTok().getLoc();
4015  Parser.Lex(); // Eat left bracket token.
4016
4017  const AsmToken &BaseRegTok = Parser.getTok();
4018  int BaseRegNum = tryParseRegister();
4019  if (BaseRegNum == -1)
4020    return Error(BaseRegTok.getLoc(), "register expected");
4021
4022  // The next token must either be a comma or a closing bracket.
4023  const AsmToken &Tok = Parser.getTok();
4024  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4025    return Error(Tok.getLoc(), "malformed memory operand");
4026
4027  if (Tok.is(AsmToken::RBrac)) {
4028    E = Tok.getLoc();
4029    Parser.Lex(); // Eat right bracket token.
4030
4031    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4032                                             0, 0, false, S, E));
4033
4034    // If there's a pre-indexing writeback marker, '!', just add it as a token
4035    // operand. It's rather odd, but syntactically valid.
4036    if (Parser.getTok().is(AsmToken::Exclaim)) {
4037      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4038      Parser.Lex(); // Eat the '!'.
4039    }
4040
4041    return false;
4042  }
4043
4044  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4045  Parser.Lex(); // Eat the comma.
4046
4047  // If we have a ':', it's an alignment specifier.
4048  if (Parser.getTok().is(AsmToken::Colon)) {
4049    Parser.Lex(); // Eat the ':'.
4050    E = Parser.getTok().getLoc();
4051
4052    const MCExpr *Expr;
4053    if (getParser().ParseExpression(Expr))
4054     return true;
4055
4056    // The expression has to be a constant. Memory references with relocations
4057    // don't come through here, as they use the <label> forms of the relevant
4058    // instructions.
4059    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4060    if (!CE)
4061      return Error (E, "constant expression expected");
4062
4063    unsigned Align = 0;
4064    switch (CE->getValue()) {
4065    default:
4066      return Error(E,
4067                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4068    case 16:  Align = 2; break;
4069    case 32:  Align = 4; break;
4070    case 64:  Align = 8; break;
4071    case 128: Align = 16; break;
4072    case 256: Align = 32; break;
4073    }
4074
4075    // Now we should have the closing ']'
4076    E = Parser.getTok().getLoc();
4077    if (Parser.getTok().isNot(AsmToken::RBrac))
4078      return Error(E, "']' expected");
4079    Parser.Lex(); // Eat right bracket token.
4080
4081    // Don't worry about range checking the value here. That's handled by
4082    // the is*() predicates.
4083    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4084                                             ARM_AM::no_shift, 0, Align,
4085                                             false, S, E));
4086
4087    // If there's a pre-indexing writeback marker, '!', just add it as a token
4088    // operand.
4089    if (Parser.getTok().is(AsmToken::Exclaim)) {
4090      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4091      Parser.Lex(); // Eat the '!'.
4092    }
4093
4094    return false;
4095  }
4096
4097  // If we have a '#', it's an immediate offset, else assume it's a register
4098  // offset. Be friendly and also accept a plain integer (without a leading
4099  // hash) for gas compatibility.
4100  if (Parser.getTok().is(AsmToken::Hash) ||
4101      Parser.getTok().is(AsmToken::Dollar) ||
4102      Parser.getTok().is(AsmToken::Integer)) {
4103    if (Parser.getTok().isNot(AsmToken::Integer))
4104      Parser.Lex(); // Eat the '#'.
4105    E = Parser.getTok().getLoc();
4106
4107    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4108    const MCExpr *Offset;
4109    if (getParser().ParseExpression(Offset))
4110     return true;
4111
4112    // The expression has to be a constant. Memory references with relocations
4113    // don't come through here, as they use the <label> forms of the relevant
4114    // instructions.
4115    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4116    if (!CE)
4117      return Error (E, "constant expression expected");
4118
4119    // If the constant was #-0, represent it as INT32_MIN.
4120    int32_t Val = CE->getValue();
4121    if (isNegative && Val == 0)
4122      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4123
4124    // Now we should have the closing ']'
4125    E = Parser.getTok().getLoc();
4126    if (Parser.getTok().isNot(AsmToken::RBrac))
4127      return Error(E, "']' expected");
4128    Parser.Lex(); // Eat right bracket token.
4129
4130    // Don't worry about range checking the value here. That's handled by
4131    // the is*() predicates.
4132    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4133                                             ARM_AM::no_shift, 0, 0,
4134                                             false, S, E));
4135
4136    // If there's a pre-indexing writeback marker, '!', just add it as a token
4137    // operand.
4138    if (Parser.getTok().is(AsmToken::Exclaim)) {
4139      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4140      Parser.Lex(); // Eat the '!'.
4141    }
4142
4143    return false;
4144  }
4145
4146  // The register offset is optionally preceded by a '+' or '-'
4147  bool isNegative = false;
4148  if (Parser.getTok().is(AsmToken::Minus)) {
4149    isNegative = true;
4150    Parser.Lex(); // Eat the '-'.
4151  } else if (Parser.getTok().is(AsmToken::Plus)) {
4152    // Nothing to do.
4153    Parser.Lex(); // Eat the '+'.
4154  }
4155
4156  E = Parser.getTok().getLoc();
4157  int OffsetRegNum = tryParseRegister();
4158  if (OffsetRegNum == -1)
4159    return Error(E, "register expected");
4160
4161  // If there's a shift operator, handle it.
4162  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4163  unsigned ShiftImm = 0;
4164  if (Parser.getTok().is(AsmToken::Comma)) {
4165    Parser.Lex(); // Eat the ','.
4166    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4167      return true;
4168  }
4169
4170  // Now we should have the closing ']'
4171  E = Parser.getTok().getLoc();
4172  if (Parser.getTok().isNot(AsmToken::RBrac))
4173    return Error(E, "']' expected");
4174  Parser.Lex(); // Eat right bracket token.
4175
4176  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4177                                           ShiftType, ShiftImm, 0, isNegative,
4178                                           S, E));
4179
4180  // If there's a pre-indexing writeback marker, '!', just add it as a token
4181  // operand.
4182  if (Parser.getTok().is(AsmToken::Exclaim)) {
4183    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4184    Parser.Lex(); // Eat the '!'.
4185  }
4186
4187  return false;
4188}
4189
4190/// parseMemRegOffsetShift - one of these two:
4191///   ( lsl | lsr | asr | ror ) , # shift_amount
4192///   rrx
4193/// return true if it parses a shift otherwise it returns false.
4194bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4195                                          unsigned &Amount) {
4196  SMLoc Loc = Parser.getTok().getLoc();
4197  const AsmToken &Tok = Parser.getTok();
4198  if (Tok.isNot(AsmToken::Identifier))
4199    return true;
4200  StringRef ShiftName = Tok.getString();
4201  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4202      ShiftName == "asl" || ShiftName == "ASL")
4203    St = ARM_AM::lsl;
4204  else if (ShiftName == "lsr" || ShiftName == "LSR")
4205    St = ARM_AM::lsr;
4206  else if (ShiftName == "asr" || ShiftName == "ASR")
4207    St = ARM_AM::asr;
4208  else if (ShiftName == "ror" || ShiftName == "ROR")
4209    St = ARM_AM::ror;
4210  else if (ShiftName == "rrx" || ShiftName == "RRX")
4211    St = ARM_AM::rrx;
4212  else
4213    return Error(Loc, "illegal shift operator");
4214  Parser.Lex(); // Eat shift type token.
4215
4216  // rrx stands alone.
4217  Amount = 0;
4218  if (St != ARM_AM::rrx) {
4219    Loc = Parser.getTok().getLoc();
4220    // A '#' and a shift amount.
4221    const AsmToken &HashTok = Parser.getTok();
4222    if (HashTok.isNot(AsmToken::Hash) &&
4223        HashTok.isNot(AsmToken::Dollar))
4224      return Error(HashTok.getLoc(), "'#' expected");
4225    Parser.Lex(); // Eat hash token.
4226
4227    const MCExpr *Expr;
4228    if (getParser().ParseExpression(Expr))
4229      return true;
4230    // Range check the immediate.
4231    // lsl, ror: 0 <= imm <= 31
4232    // lsr, asr: 0 <= imm <= 32
4233    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4234    if (!CE)
4235      return Error(Loc, "shift amount must be an immediate");
4236    int64_t Imm = CE->getValue();
4237    if (Imm < 0 ||
4238        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4239        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4240      return Error(Loc, "immediate shift value out of range");
4241    Amount = Imm;
4242  }
4243
4244  return false;
4245}
4246
4247/// parseFPImm - A floating point immediate expression operand.
4248ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4249parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4250  SMLoc S = Parser.getTok().getLoc();
4251
4252  if (Parser.getTok().isNot(AsmToken::Hash) &&
4253      Parser.getTok().isNot(AsmToken::Dollar))
4254    return MatchOperand_NoMatch;
4255
4256  // Disambiguate the VMOV forms that can accept an FP immediate.
4257  // vmov.f32 <sreg>, #imm
4258  // vmov.f64 <dreg>, #imm
4259  // vmov.f32 <dreg>, #imm  @ vector f32x2
4260  // vmov.f32 <qreg>, #imm  @ vector f32x4
4261  //
4262  // There are also the NEON VMOV instructions which expect an
4263  // integer constant. Make sure we don't try to parse an FPImm
4264  // for these:
4265  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4266  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4267  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4268                           TyOp->getToken() != ".f64"))
4269    return MatchOperand_NoMatch;
4270
4271  Parser.Lex(); // Eat the '#'.
4272
4273  // Handle negation, as that still comes through as a separate token.
4274  bool isNegative = false;
4275  if (Parser.getTok().is(AsmToken::Minus)) {
4276    isNegative = true;
4277    Parser.Lex();
4278  }
4279  const AsmToken &Tok = Parser.getTok();
4280  if (Tok.is(AsmToken::Real)) {
4281    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4282    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4283    // If we had a '-' in front, toggle the sign bit.
4284    IntVal ^= (uint64_t)isNegative << 63;
4285    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4286    Parser.Lex(); // Eat the token.
4287    if (Val == -1) {
4288      TokError("floating point value out of range");
4289      return MatchOperand_ParseFail;
4290    }
4291    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4292    return MatchOperand_Success;
4293  }
4294  if (Tok.is(AsmToken::Integer)) {
4295    int64_t Val = Tok.getIntVal();
4296    Parser.Lex(); // Eat the token.
4297    if (Val > 255 || Val < 0) {
4298      TokError("encoded floating point value out of range");
4299      return MatchOperand_ParseFail;
4300    }
4301    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4302    return MatchOperand_Success;
4303  }
4304
4305  TokError("invalid floating point immediate");
4306  return MatchOperand_ParseFail;
4307}
4308/// Parse a arm instruction operand.  For now this parses the operand regardless
4309/// of the mnemonic.
4310bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4311                                StringRef Mnemonic) {
4312  SMLoc S, E;
4313
4314  // Check if the current operand has a custom associated parser, if so, try to
4315  // custom parse the operand, or fallback to the general approach.
4316  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4317  if (ResTy == MatchOperand_Success)
4318    return false;
4319  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4320  // there was a match, but an error occurred, in which case, just return that
4321  // the operand parsing failed.
4322  if (ResTy == MatchOperand_ParseFail)
4323    return true;
4324
4325  switch (getLexer().getKind()) {
4326  default:
4327    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4328    return true;
4329  case AsmToken::Identifier: {
4330    if (!tryParseRegisterWithWriteBack(Operands))
4331      return false;
4332    int Res = tryParseShiftRegister(Operands);
4333    if (Res == 0) // success
4334      return false;
4335    else if (Res == -1) // irrecoverable error
4336      return true;
4337    // If this is VMRS, check for the apsr_nzcv operand.
4338    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4339      S = Parser.getTok().getLoc();
4340      Parser.Lex();
4341      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4342      return false;
4343    }
4344
4345    // Fall though for the Identifier case that is not a register or a
4346    // special name.
4347  }
4348  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4349  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4350  case AsmToken::String:  // quoted label names.
4351  case AsmToken::Dot: {   // . as a branch target
4352    // This was not a register so parse other operands that start with an
4353    // identifier (like labels) as expressions and create them as immediates.
4354    const MCExpr *IdVal;
4355    S = Parser.getTok().getLoc();
4356    if (getParser().ParseExpression(IdVal))
4357      return true;
4358    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4359    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4360    return false;
4361  }
4362  case AsmToken::LBrac:
4363    return parseMemory(Operands);
4364  case AsmToken::LCurly:
4365    return parseRegisterList(Operands);
4366  case AsmToken::Dollar:
4367  case AsmToken::Hash: {
4368    // #42 -> immediate.
4369    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4370    S = Parser.getTok().getLoc();
4371    Parser.Lex();
4372    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4373    const MCExpr *ImmVal;
4374    if (getParser().ParseExpression(ImmVal))
4375      return true;
4376    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4377    if (CE) {
4378      int32_t Val = CE->getValue();
4379      if (isNegative && Val == 0)
4380        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4381    }
4382    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4383    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4384    return false;
4385  }
4386  case AsmToken::Colon: {
4387    // ":lower16:" and ":upper16:" expression prefixes
4388    // FIXME: Check it's an expression prefix,
4389    // e.g. (FOO - :lower16:BAR) isn't legal.
4390    ARMMCExpr::VariantKind RefKind;
4391    if (parsePrefix(RefKind))
4392      return true;
4393
4394    const MCExpr *SubExprVal;
4395    if (getParser().ParseExpression(SubExprVal))
4396      return true;
4397
4398    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4399                                                   getContext());
4400    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4401    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4402    return false;
4403  }
4404  }
4405}
4406
4407// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4408//  :lower16: and :upper16:.
4409bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4410  RefKind = ARMMCExpr::VK_ARM_None;
4411
4412  // :lower16: and :upper16: modifiers
4413  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4414  Parser.Lex(); // Eat ':'
4415
4416  if (getLexer().isNot(AsmToken::Identifier)) {
4417    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4418    return true;
4419  }
4420
4421  StringRef IDVal = Parser.getTok().getIdentifier();
4422  if (IDVal == "lower16") {
4423    RefKind = ARMMCExpr::VK_ARM_LO16;
4424  } else if (IDVal == "upper16") {
4425    RefKind = ARMMCExpr::VK_ARM_HI16;
4426  } else {
4427    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4428    return true;
4429  }
4430  Parser.Lex();
4431
4432  if (getLexer().isNot(AsmToken::Colon)) {
4433    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4434    return true;
4435  }
4436  Parser.Lex(); // Eat the last ':'
4437  return false;
4438}
4439
4440/// \brief Given a mnemonic, split out possible predication code and carry
4441/// setting letters to form a canonical mnemonic and flags.
4442//
4443// FIXME: Would be nice to autogen this.
4444// FIXME: This is a bit of a maze of special cases.
4445StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4446                                      unsigned &PredicationCode,
4447                                      bool &CarrySetting,
4448                                      unsigned &ProcessorIMod,
4449                                      StringRef &ITMask) {
4450  PredicationCode = ARMCC::AL;
4451  CarrySetting = false;
4452  ProcessorIMod = 0;
4453
4454  // Ignore some mnemonics we know aren't predicated forms.
4455  //
4456  // FIXME: Would be nice to autogen this.
4457  if ((Mnemonic == "movs" && isThumb()) ||
4458      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4459      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4460      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4461      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4462      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4463      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4464      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4465      Mnemonic == "fmuls")
4466    return Mnemonic;
4467
4468  // First, split out any predication code. Ignore mnemonics we know aren't
4469  // predicated but do have a carry-set and so weren't caught above.
4470  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4471      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4472      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4473      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4474    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4475      .Case("eq", ARMCC::EQ)
4476      .Case("ne", ARMCC::NE)
4477      .Case("hs", ARMCC::HS)
4478      .Case("cs", ARMCC::HS)
4479      .Case("lo", ARMCC::LO)
4480      .Case("cc", ARMCC::LO)
4481      .Case("mi", ARMCC::MI)
4482      .Case("pl", ARMCC::PL)
4483      .Case("vs", ARMCC::VS)
4484      .Case("vc", ARMCC::VC)
4485      .Case("hi", ARMCC::HI)
4486      .Case("ls", ARMCC::LS)
4487      .Case("ge", ARMCC::GE)
4488      .Case("lt", ARMCC::LT)
4489      .Case("gt", ARMCC::GT)
4490      .Case("le", ARMCC::LE)
4491      .Case("al", ARMCC::AL)
4492      .Default(~0U);
4493    if (CC != ~0U) {
4494      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4495      PredicationCode = CC;
4496    }
4497  }
4498
4499  // Next, determine if we have a carry setting bit. We explicitly ignore all
4500  // the instructions we know end in 's'.
4501  if (Mnemonic.endswith("s") &&
4502      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4503        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4504        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4505        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4506        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4507        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4508        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4509        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4510        (Mnemonic == "movs" && isThumb()))) {
4511    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4512    CarrySetting = true;
4513  }
4514
4515  // The "cps" instruction can have a interrupt mode operand which is glued into
4516  // the mnemonic. Check if this is the case, split it and parse the imod op
4517  if (Mnemonic.startswith("cps")) {
4518    // Split out any imod code.
4519    unsigned IMod =
4520      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4521      .Case("ie", ARM_PROC::IE)
4522      .Case("id", ARM_PROC::ID)
4523      .Default(~0U);
4524    if (IMod != ~0U) {
4525      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4526      ProcessorIMod = IMod;
4527    }
4528  }
4529
4530  // The "it" instruction has the condition mask on the end of the mnemonic.
4531  if (Mnemonic.startswith("it")) {
4532    ITMask = Mnemonic.slice(2, Mnemonic.size());
4533    Mnemonic = Mnemonic.slice(0, 2);
4534  }
4535
4536  return Mnemonic;
4537}
4538
4539/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4540/// inclusion of carry set or predication code operands.
4541//
4542// FIXME: It would be nice to autogen this.
4543void ARMAsmParser::
4544getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4545                      bool &CanAcceptPredicationCode) {
4546  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4547      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4548      Mnemonic == "add" || Mnemonic == "adc" ||
4549      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4550      Mnemonic == "orr" || Mnemonic == "mvn" ||
4551      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4552      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4553      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4554                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4555                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4556    CanAcceptCarrySet = true;
4557  } else
4558    CanAcceptCarrySet = false;
4559
4560  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4561      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4562      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4563      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4564      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4565      (Mnemonic == "clrex" && !isThumb()) ||
4566      (Mnemonic == "nop" && isThumbOne()) ||
4567      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4568        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4569        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4570      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4571       !isThumb()) ||
4572      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4573    CanAcceptPredicationCode = false;
4574  } else
4575    CanAcceptPredicationCode = true;
4576
4577  if (isThumb()) {
4578    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4579        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4580      CanAcceptPredicationCode = false;
4581  }
4582}
4583
4584bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4585                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4586  // FIXME: This is all horribly hacky. We really need a better way to deal
4587  // with optional operands like this in the matcher table.
4588
4589  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4590  // another does not. Specifically, the MOVW instruction does not. So we
4591  // special case it here and remove the defaulted (non-setting) cc_out
4592  // operand if that's the instruction we're trying to match.
4593  //
4594  // We do this as post-processing of the explicit operands rather than just
4595  // conditionally adding the cc_out in the first place because we need
4596  // to check the type of the parsed immediate operand.
4597  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4598      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4599      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4600      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4601    return true;
4602
4603  // Register-register 'add' for thumb does not have a cc_out operand
4604  // when there are only two register operands.
4605  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4606      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4607      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4608      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4609    return true;
4610  // Register-register 'add' for thumb does not have a cc_out operand
4611  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4612  // have to check the immediate range here since Thumb2 has a variant
4613  // that can handle a different range and has a cc_out operand.
4614  if (((isThumb() && Mnemonic == "add") ||
4615       (isThumbTwo() && Mnemonic == "sub")) &&
4616      Operands.size() == 6 &&
4617      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4618      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4619      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4620      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4621      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4622       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4623    return true;
4624  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4625  // imm0_4095 variant. That's the least-preferred variant when
4626  // selecting via the generic "add" mnemonic, so to know that we
4627  // should remove the cc_out operand, we have to explicitly check that
4628  // it's not one of the other variants. Ugh.
4629  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4630      Operands.size() == 6 &&
4631      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4632      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4633      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4634    // Nest conditions rather than one big 'if' statement for readability.
4635    //
4636    // If either register is a high reg, it's either one of the SP
4637    // variants (handled above) or a 32-bit encoding, so we just
4638    // check against T3.
4639    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4640         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4641        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4642      return false;
4643    // If both registers are low, we're in an IT block, and the immediate is
4644    // in range, we should use encoding T1 instead, which has a cc_out.
4645    if (inITBlock() &&
4646        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4647        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4648        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4649      return false;
4650
4651    // Otherwise, we use encoding T4, which does not have a cc_out
4652    // operand.
4653    return true;
4654  }
4655
4656  // The thumb2 multiply instruction doesn't have a CCOut register, so
4657  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4658  // use the 16-bit encoding or not.
4659  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4660      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4661      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4662      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4663      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4664      // If the registers aren't low regs, the destination reg isn't the
4665      // same as one of the source regs, or the cc_out operand is zero
4666      // outside of an IT block, we have to use the 32-bit encoding, so
4667      // remove the cc_out operand.
4668      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4669       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4670       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4671       !inITBlock() ||
4672       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4673        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4674        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4675        static_cast<ARMOperand*>(Operands[4])->getReg())))
4676    return true;
4677
4678  // Also check the 'mul' syntax variant that doesn't specify an explicit
4679  // destination register.
4680  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4681      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4682      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4683      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4684      // If the registers aren't low regs  or the cc_out operand is zero
4685      // outside of an IT block, we have to use the 32-bit encoding, so
4686      // remove the cc_out operand.
4687      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4688       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4689       !inITBlock()))
4690    return true;
4691
4692
4693
4694  // Register-register 'add/sub' for thumb does not have a cc_out operand
4695  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4696  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4697  // right, this will result in better diagnostics (which operand is off)
4698  // anyway.
4699  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4700      (Operands.size() == 5 || Operands.size() == 6) &&
4701      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4702      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4703      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4704    return true;
4705
4706  return false;
4707}
4708
4709static bool isDataTypeToken(StringRef Tok) {
4710  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4711    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4712    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4713    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4714    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4715    Tok == ".f" || Tok == ".d";
4716}
4717
4718// FIXME: This bit should probably be handled via an explicit match class
4719// in the .td files that matches the suffix instead of having it be
4720// a literal string token the way it is now.
4721static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4722  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4723}
4724
4725static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4726/// Parse an arm instruction mnemonic followed by its operands.
4727bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4728                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4729  // Apply mnemonic aliases before doing anything else, as the destination
4730  // mnemnonic may include suffices and we want to handle them normally.
4731  // The generic tblgen'erated code does this later, at the start of
4732  // MatchInstructionImpl(), but that's too late for aliases that include
4733  // any sort of suffix.
4734  unsigned AvailableFeatures = getAvailableFeatures();
4735  applyMnemonicAliases(Name, AvailableFeatures);
4736
4737  // First check for the ARM-specific .req directive.
4738  if (Parser.getTok().is(AsmToken::Identifier) &&
4739      Parser.getTok().getIdentifier() == ".req") {
4740    parseDirectiveReq(Name, NameLoc);
4741    // We always return 'error' for this, as we're done with this
4742    // statement and don't need to match the 'instruction."
4743    return true;
4744  }
4745
4746  // Create the leading tokens for the mnemonic, split by '.' characters.
4747  size_t Start = 0, Next = Name.find('.');
4748  StringRef Mnemonic = Name.slice(Start, Next);
4749
4750  // Split out the predication code and carry setting flag from the mnemonic.
4751  unsigned PredicationCode;
4752  unsigned ProcessorIMod;
4753  bool CarrySetting;
4754  StringRef ITMask;
4755  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4756                           ProcessorIMod, ITMask);
4757
4758  // In Thumb1, only the branch (B) instruction can be predicated.
4759  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4760    Parser.EatToEndOfStatement();
4761    return Error(NameLoc, "conditional execution not supported in Thumb1");
4762  }
4763
4764  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4765
4766  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4767  // is the mask as it will be for the IT encoding if the conditional
4768  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4769  // where the conditional bit0 is zero, the instruction post-processing
4770  // will adjust the mask accordingly.
4771  if (Mnemonic == "it") {
4772    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4773    if (ITMask.size() > 3) {
4774      Parser.EatToEndOfStatement();
4775      return Error(Loc, "too many conditions on IT instruction");
4776    }
4777    unsigned Mask = 8;
4778    for (unsigned i = ITMask.size(); i != 0; --i) {
4779      char pos = ITMask[i - 1];
4780      if (pos != 't' && pos != 'e') {
4781        Parser.EatToEndOfStatement();
4782        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4783      }
4784      Mask >>= 1;
4785      if (ITMask[i - 1] == 't')
4786        Mask |= 8;
4787    }
4788    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4789  }
4790
4791  // FIXME: This is all a pretty gross hack. We should automatically handle
4792  // optional operands like this via tblgen.
4793
4794  // Next, add the CCOut and ConditionCode operands, if needed.
4795  //
4796  // For mnemonics which can ever incorporate a carry setting bit or predication
4797  // code, our matching model involves us always generating CCOut and
4798  // ConditionCode operands to match the mnemonic "as written" and then we let
4799  // the matcher deal with finding the right instruction or generating an
4800  // appropriate error.
4801  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4802  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4803
4804  // If we had a carry-set on an instruction that can't do that, issue an
4805  // error.
4806  if (!CanAcceptCarrySet && CarrySetting) {
4807    Parser.EatToEndOfStatement();
4808    return Error(NameLoc, "instruction '" + Mnemonic +
4809                 "' can not set flags, but 's' suffix specified");
4810  }
4811  // If we had a predication code on an instruction that can't do that, issue an
4812  // error.
4813  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4814    Parser.EatToEndOfStatement();
4815    return Error(NameLoc, "instruction '" + Mnemonic +
4816                 "' is not predicable, but condition code specified");
4817  }
4818
4819  // Add the carry setting operand, if necessary.
4820  if (CanAcceptCarrySet) {
4821    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4822    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4823                                               Loc));
4824  }
4825
4826  // Add the predication code operand, if necessary.
4827  if (CanAcceptPredicationCode) {
4828    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4829                                      CarrySetting);
4830    Operands.push_back(ARMOperand::CreateCondCode(
4831                         ARMCC::CondCodes(PredicationCode), Loc));
4832  }
4833
4834  // Add the processor imod operand, if necessary.
4835  if (ProcessorIMod) {
4836    Operands.push_back(ARMOperand::CreateImm(
4837          MCConstantExpr::Create(ProcessorIMod, getContext()),
4838                                 NameLoc, NameLoc));
4839  }
4840
4841  // Add the remaining tokens in the mnemonic.
4842  while (Next != StringRef::npos) {
4843    Start = Next;
4844    Next = Name.find('.', Start + 1);
4845    StringRef ExtraToken = Name.slice(Start, Next);
4846
4847    // Some NEON instructions have an optional datatype suffix that is
4848    // completely ignored. Check for that.
4849    if (isDataTypeToken(ExtraToken) &&
4850        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4851      continue;
4852
4853    if (ExtraToken != ".n") {
4854      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4855      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4856    }
4857  }
4858
4859  // Read the remaining operands.
4860  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4861    // Read the first operand.
4862    if (parseOperand(Operands, Mnemonic)) {
4863      Parser.EatToEndOfStatement();
4864      return true;
4865    }
4866
4867    while (getLexer().is(AsmToken::Comma)) {
4868      Parser.Lex();  // Eat the comma.
4869
4870      // Parse and remember the operand.
4871      if (parseOperand(Operands, Mnemonic)) {
4872        Parser.EatToEndOfStatement();
4873        return true;
4874      }
4875    }
4876  }
4877
4878  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4879    SMLoc Loc = getLexer().getLoc();
4880    Parser.EatToEndOfStatement();
4881    return Error(Loc, "unexpected token in argument list");
4882  }
4883
4884  Parser.Lex(); // Consume the EndOfStatement
4885
4886  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4887  // do and don't have a cc_out optional-def operand. With some spot-checks
4888  // of the operand list, we can figure out which variant we're trying to
4889  // parse and adjust accordingly before actually matching. We shouldn't ever
4890  // try to remove a cc_out operand that was explicitly set on the the
4891  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4892  // table driven matcher doesn't fit well with the ARM instruction set.
4893  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4894    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4895    Operands.erase(Operands.begin() + 1);
4896    delete Op;
4897  }
4898
4899  // ARM mode 'blx' need special handling, as the register operand version
4900  // is predicable, but the label operand version is not. So, we can't rely
4901  // on the Mnemonic based checking to correctly figure out when to put
4902  // a k_CondCode operand in the list. If we're trying to match the label
4903  // version, remove the k_CondCode operand here.
4904  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4905      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4906    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4907    Operands.erase(Operands.begin() + 1);
4908    delete Op;
4909  }
4910
4911  // The vector-compare-to-zero instructions have a literal token "#0" at
4912  // the end that comes to here as an immediate operand. Convert it to a
4913  // token to play nicely with the matcher.
4914  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4915      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4916      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4917    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4918    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4919    if (CE && CE->getValue() == 0) {
4920      Operands.erase(Operands.begin() + 5);
4921      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4922      delete Op;
4923    }
4924  }
4925  // VCMP{E} does the same thing, but with a different operand count.
4926  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4927      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4928    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4929    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4930    if (CE && CE->getValue() == 0) {
4931      Operands.erase(Operands.begin() + 4);
4932      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4933      delete Op;
4934    }
4935  }
4936  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4937  // end. Convert it to a token here. Take care not to convert those
4938  // that should hit the Thumb2 encoding.
4939  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4940      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4941      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4942      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4943    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4944    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4945    if (CE && CE->getValue() == 0 &&
4946        (isThumbOne() ||
4947         // The cc_out operand matches the IT block.
4948         ((inITBlock() != CarrySetting) &&
4949         // Neither register operand is a high register.
4950         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4951          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4952      Operands.erase(Operands.begin() + 5);
4953      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4954      delete Op;
4955    }
4956  }
4957
4958  return false;
4959}
4960
4961// Validate context-sensitive operand constraints.
4962
4963// return 'true' if register list contains non-low GPR registers,
4964// 'false' otherwise. If Reg is in the register list or is HiReg, set
4965// 'containsReg' to true.
4966static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4967                                 unsigned HiReg, bool &containsReg) {
4968  containsReg = false;
4969  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4970    unsigned OpReg = Inst.getOperand(i).getReg();
4971    if (OpReg == Reg)
4972      containsReg = true;
4973    // Anything other than a low register isn't legal here.
4974    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4975      return true;
4976  }
4977  return false;
4978}
4979
4980// Check if the specified regisgter is in the register list of the inst,
4981// starting at the indicated operand number.
4982static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4983  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4984    unsigned OpReg = Inst.getOperand(i).getReg();
4985    if (OpReg == Reg)
4986      return true;
4987  }
4988  return false;
4989}
4990
4991// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4992// the ARMInsts array) instead. Getting that here requires awkward
4993// API changes, though. Better way?
4994namespace llvm {
4995extern const MCInstrDesc ARMInsts[];
4996}
4997static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4998  return ARMInsts[Opcode];
4999}
5000
5001// FIXME: We would really like to be able to tablegen'erate this.
5002bool ARMAsmParser::
5003validateInstruction(MCInst &Inst,
5004                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5005  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5006  SMLoc Loc = Operands[0]->getStartLoc();
5007  // Check the IT block state first.
5008  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
5009  // being allowed in IT blocks, but not being predicable.  It just always
5010  // executes.
5011  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
5012    unsigned bit = 1;
5013    if (ITState.FirstCond)
5014      ITState.FirstCond = false;
5015    else
5016      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5017    // The instruction must be predicable.
5018    if (!MCID.isPredicable())
5019      return Error(Loc, "instructions in IT block must be predicable");
5020    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5021    unsigned ITCond = bit ? ITState.Cond :
5022      ARMCC::getOppositeCondition(ITState.Cond);
5023    if (Cond != ITCond) {
5024      // Find the condition code Operand to get its SMLoc information.
5025      SMLoc CondLoc;
5026      for (unsigned i = 1; i < Operands.size(); ++i)
5027        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5028          CondLoc = Operands[i]->getStartLoc();
5029      return Error(CondLoc, "incorrect condition in IT block; got '" +
5030                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5031                   "', but expected '" +
5032                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5033    }
5034  // Check for non-'al' condition codes outside of the IT block.
5035  } else if (isThumbTwo() && MCID.isPredicable() &&
5036             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5037             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5038             Inst.getOpcode() != ARM::t2B)
5039    return Error(Loc, "predicated instructions must be in IT block");
5040
5041  switch (Inst.getOpcode()) {
5042  case ARM::LDRD:
5043  case ARM::LDRD_PRE:
5044  case ARM::LDRD_POST:
5045  case ARM::LDREXD: {
5046    // Rt2 must be Rt + 1.
5047    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5048    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5049    if (Rt2 != Rt + 1)
5050      return Error(Operands[3]->getStartLoc(),
5051                   "destination operands must be sequential");
5052    return false;
5053  }
5054  case ARM::STRD: {
5055    // Rt2 must be Rt + 1.
5056    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5057    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5058    if (Rt2 != Rt + 1)
5059      return Error(Operands[3]->getStartLoc(),
5060                   "source operands must be sequential");
5061    return false;
5062  }
5063  case ARM::STRD_PRE:
5064  case ARM::STRD_POST:
5065  case ARM::STREXD: {
5066    // Rt2 must be Rt + 1.
5067    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5068    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5069    if (Rt2 != Rt + 1)
5070      return Error(Operands[3]->getStartLoc(),
5071                   "source operands must be sequential");
5072    return false;
5073  }
5074  case ARM::SBFX:
5075  case ARM::UBFX: {
5076    // width must be in range [1, 32-lsb]
5077    unsigned lsb = Inst.getOperand(2).getImm();
5078    unsigned widthm1 = Inst.getOperand(3).getImm();
5079    if (widthm1 >= 32 - lsb)
5080      return Error(Operands[5]->getStartLoc(),
5081                   "bitfield width must be in range [1,32-lsb]");
5082    return false;
5083  }
5084  case ARM::tLDMIA: {
5085    // If we're parsing Thumb2, the .w variant is available and handles
5086    // most cases that are normally illegal for a Thumb1 LDM
5087    // instruction. We'll make the transformation in processInstruction()
5088    // if necessary.
5089    //
5090    // Thumb LDM instructions are writeback iff the base register is not
5091    // in the register list.
5092    unsigned Rn = Inst.getOperand(0).getReg();
5093    bool hasWritebackToken =
5094      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5095       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5096    bool listContainsBase;
5097    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5098      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5099                   "registers must be in range r0-r7");
5100    // If we should have writeback, then there should be a '!' token.
5101    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5102      return Error(Operands[2]->getStartLoc(),
5103                   "writeback operator '!' expected");
5104    // If we should not have writeback, there must not be a '!'. This is
5105    // true even for the 32-bit wide encodings.
5106    if (listContainsBase && hasWritebackToken)
5107      return Error(Operands[3]->getStartLoc(),
5108                   "writeback operator '!' not allowed when base register "
5109                   "in register list");
5110
5111    break;
5112  }
5113  case ARM::t2LDMIA_UPD: {
5114    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5115      return Error(Operands[4]->getStartLoc(),
5116                   "writeback operator '!' not allowed when base register "
5117                   "in register list");
5118    break;
5119  }
5120  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5121  // so only issue a diagnostic for thumb1. The instructions will be
5122  // switched to the t2 encodings in processInstruction() if necessary.
5123  case ARM::tPOP: {
5124    bool listContainsBase;
5125    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5126        !isThumbTwo())
5127      return Error(Operands[2]->getStartLoc(),
5128                   "registers must be in range r0-r7 or pc");
5129    break;
5130  }
5131  case ARM::tPUSH: {
5132    bool listContainsBase;
5133    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5134        !isThumbTwo())
5135      return Error(Operands[2]->getStartLoc(),
5136                   "registers must be in range r0-r7 or lr");
5137    break;
5138  }
5139  case ARM::tSTMIA_UPD: {
5140    bool listContainsBase;
5141    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5142      return Error(Operands[4]->getStartLoc(),
5143                   "registers must be in range r0-r7");
5144    break;
5145  }
5146  }
5147
5148  return false;
5149}
5150
5151static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5152  switch(Opc) {
5153  default: assert(0 && "unexpected opcode!");
5154  // VST1LN
5155  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5156  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5157  case ARM::VST1LNdWB_fixed_Asm_U8:
5158    Spacing = 1;
5159    return ARM::VST1LNd8_UPD;
5160  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5161  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5162  case ARM::VST1LNdWB_fixed_Asm_U16:
5163    Spacing = 1;
5164    return ARM::VST1LNd16_UPD;
5165  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5166  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5167  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5168    Spacing = 1;
5169    return ARM::VST1LNd32_UPD;
5170  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5171  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5172  case ARM::VST1LNdWB_register_Asm_U8:
5173    Spacing = 1;
5174    return ARM::VST1LNd8_UPD;
5175  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5176  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5177  case ARM::VST1LNdWB_register_Asm_U16:
5178    Spacing = 1;
5179    return ARM::VST1LNd16_UPD;
5180  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5181  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5182  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5183    Spacing = 1;
5184    return ARM::VST1LNd32_UPD;
5185  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5186  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5187  case ARM::VST1LNdAsm_U8:
5188    Spacing = 1;
5189    return ARM::VST1LNd8;
5190  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5191  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5192  case ARM::VST1LNdAsm_U16:
5193    Spacing = 1;
5194    return ARM::VST1LNd16;
5195  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5196  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5197  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5198    Spacing = 1;
5199    return ARM::VST1LNd32;
5200
5201  // VST2LN
5202  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5203  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5204  case ARM::VST2LNdWB_fixed_Asm_U8:
5205    Spacing = 1;
5206    return ARM::VST2LNd8_UPD;
5207  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5208  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5209  case ARM::VST2LNdWB_fixed_Asm_U16:
5210    Spacing = 1;
5211    return ARM::VST2LNd16_UPD;
5212  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5213  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5214  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5215    Spacing = 1;
5216    return ARM::VST2LNd32_UPD;
5217  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5218  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5219  case ARM::VST2LNqWB_fixed_Asm_U16:
5220    Spacing = 2;
5221    return ARM::VST2LNq16_UPD;
5222  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5223  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5224  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5225    Spacing = 2;
5226    return ARM::VST2LNq32_UPD;
5227
5228  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5229  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5230  case ARM::VST2LNdWB_register_Asm_U8:
5231    Spacing = 1;
5232    return ARM::VST2LNd8_UPD;
5233  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5234  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5235  case ARM::VST2LNdWB_register_Asm_U16:
5236    Spacing = 1;
5237    return ARM::VST2LNd16_UPD;
5238  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5239  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5240  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5241    Spacing = 1;
5242    return ARM::VST2LNd32_UPD;
5243  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5244  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5245  case ARM::VST2LNqWB_register_Asm_U16:
5246    Spacing = 2;
5247    return ARM::VST2LNq16_UPD;
5248  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5249  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5250  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5251    Spacing = 2;
5252    return ARM::VST2LNq32_UPD;
5253
5254  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5255  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5256  case ARM::VST2LNdAsm_U8:
5257    Spacing = 1;
5258    return ARM::VST2LNd8;
5259  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5260  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5261  case ARM::VST2LNdAsm_U16:
5262    Spacing = 1;
5263    return ARM::VST2LNd16;
5264  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5265  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5266  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5267    Spacing = 1;
5268    return ARM::VST2LNd32;
5269  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5270  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5271  case ARM::VST2LNqAsm_U16:
5272    Spacing = 2;
5273    return ARM::VST2LNq16;
5274  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5275  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5276  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5277    Spacing = 2;
5278    return ARM::VST2LNq32;
5279  }
5280}
5281
5282static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5283  switch(Opc) {
5284  default: assert(0 && "unexpected opcode!");
5285  // VLD1LN
5286  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5287  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5288  case ARM::VLD1LNdWB_fixed_Asm_U8:
5289    Spacing = 1;
5290    return ARM::VLD1LNd8_UPD;
5291  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5292  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5293  case ARM::VLD1LNdWB_fixed_Asm_U16:
5294    Spacing = 1;
5295    return ARM::VLD1LNd16_UPD;
5296  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5297  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5298  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5299    Spacing = 1;
5300    return ARM::VLD1LNd32_UPD;
5301  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5302  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5303  case ARM::VLD1LNdWB_register_Asm_U8:
5304    Spacing = 1;
5305    return ARM::VLD1LNd8_UPD;
5306  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5307  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5308  case ARM::VLD1LNdWB_register_Asm_U16:
5309    Spacing = 1;
5310    return ARM::VLD1LNd16_UPD;
5311  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5312  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5313  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5314    Spacing = 1;
5315    return ARM::VLD1LNd32_UPD;
5316  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5317  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5318  case ARM::VLD1LNdAsm_U8:
5319    Spacing = 1;
5320    return ARM::VLD1LNd8;
5321  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5322  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5323  case ARM::VLD1LNdAsm_U16:
5324    Spacing = 1;
5325    return ARM::VLD1LNd16;
5326  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5327  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5328  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5329    Spacing = 1;
5330    return ARM::VLD1LNd32;
5331
5332  // VLD2LN
5333  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5334  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5335  case ARM::VLD2LNdWB_fixed_Asm_U8:
5336    Spacing = 1;
5337    return ARM::VLD2LNd8_UPD;
5338  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5339  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5340  case ARM::VLD2LNdWB_fixed_Asm_U16:
5341    Spacing = 1;
5342    return ARM::VLD2LNd16_UPD;
5343  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5344  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5345  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5346    Spacing = 1;
5347    return ARM::VLD2LNd32_UPD;
5348  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5349  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5350  case ARM::VLD2LNqWB_fixed_Asm_U16:
5351    Spacing = 1;
5352    return ARM::VLD2LNq16_UPD;
5353  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5354  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5355  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5356    Spacing = 2;
5357    return ARM::VLD2LNq32_UPD;
5358  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5359  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5360  case ARM::VLD2LNdWB_register_Asm_U8:
5361    Spacing = 1;
5362    return ARM::VLD2LNd8_UPD;
5363  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5364  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5365  case ARM::VLD2LNdWB_register_Asm_U16:
5366    Spacing = 1;
5367    return ARM::VLD2LNd16_UPD;
5368  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5369  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5370  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5371    Spacing = 1;
5372    return ARM::VLD2LNd32_UPD;
5373  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5374  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5375  case ARM::VLD2LNqWB_register_Asm_U16:
5376    Spacing = 2;
5377    return ARM::VLD2LNq16_UPD;
5378  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5379  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5380  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5381    Spacing = 2;
5382    return ARM::VLD2LNq32_UPD;
5383  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5384  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5385  case ARM::VLD2LNdAsm_U8:
5386    Spacing = 1;
5387    return ARM::VLD2LNd8;
5388  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5389  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5390  case ARM::VLD2LNdAsm_U16:
5391    Spacing = 1;
5392    return ARM::VLD2LNd16;
5393  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5394  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5395  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5396    Spacing = 1;
5397    return ARM::VLD2LNd32;
5398  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5399  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5400  case ARM::VLD2LNqAsm_U16:
5401    Spacing = 2;
5402    return ARM::VLD2LNq16;
5403  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5404  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5405  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5406    Spacing = 2;
5407    return ARM::VLD2LNq32;
5408  }
5409}
5410
5411bool ARMAsmParser::
5412processInstruction(MCInst &Inst,
5413                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5414  switch (Inst.getOpcode()) {
5415  // Aliases for alternate PC+imm syntax of LDR instructions.
5416  case ARM::t2LDRpcrel:
5417    Inst.setOpcode(ARM::t2LDRpci);
5418    return true;
5419  case ARM::t2LDRBpcrel:
5420    Inst.setOpcode(ARM::t2LDRBpci);
5421    return true;
5422  case ARM::t2LDRHpcrel:
5423    Inst.setOpcode(ARM::t2LDRHpci);
5424    return true;
5425  case ARM::t2LDRSBpcrel:
5426    Inst.setOpcode(ARM::t2LDRSBpci);
5427    return true;
5428  case ARM::t2LDRSHpcrel:
5429    Inst.setOpcode(ARM::t2LDRSHpci);
5430    return true;
5431  // Handle NEON VST complex aliases.
5432  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5433  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5434  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5435  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5436  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5437  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5438  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5439  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5440    MCInst TmpInst;
5441    // Shuffle the operands around so the lane index operand is in the
5442    // right place.
5443    unsigned Spacing;
5444    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5445    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5446    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5447    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5448    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5449    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5450    TmpInst.addOperand(Inst.getOperand(1)); // lane
5451    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5452    TmpInst.addOperand(Inst.getOperand(6));
5453    Inst = TmpInst;
5454    return true;
5455  }
5456
5457  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5458  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5459  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5460  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5461  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5462  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5463  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5464  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5465  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5466  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5467  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5468  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5469  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5470  case ARM::VST2LNqWB_register_Asm_U32: {
5471    MCInst TmpInst;
5472    // Shuffle the operands around so the lane index operand is in the
5473    // right place.
5474    unsigned Spacing;
5475    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5476    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5477    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5478    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5479    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5480    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5481    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5482                                            Spacing));
5483    TmpInst.addOperand(Inst.getOperand(1)); // lane
5484    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5485    TmpInst.addOperand(Inst.getOperand(6));
5486    Inst = TmpInst;
5487    return true;
5488  }
5489  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5490  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5491  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5492  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5493  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5494  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5495  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5496  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5497    MCInst TmpInst;
5498    // Shuffle the operands around so the lane index operand is in the
5499    // right place.
5500    unsigned Spacing;
5501    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5502    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5503    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5504    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5505    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5506    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5507    TmpInst.addOperand(Inst.getOperand(1)); // lane
5508    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5509    TmpInst.addOperand(Inst.getOperand(5));
5510    Inst = TmpInst;
5511    return true;
5512  }
5513
5514  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5515  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5516  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5517  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5518  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5519  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5520  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5521  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5522  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5523  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5524  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5525  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5526  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5527  case ARM::VST2LNqWB_fixed_Asm_U32: {
5528    MCInst TmpInst;
5529    // Shuffle the operands around so the lane index operand is in the
5530    // right place.
5531    unsigned Spacing;
5532    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5533    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5534    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5535    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5536    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5537    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5538    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5539                                            Spacing));
5540    TmpInst.addOperand(Inst.getOperand(1)); // lane
5541    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5542    TmpInst.addOperand(Inst.getOperand(5));
5543    Inst = TmpInst;
5544    return true;
5545  }
5546  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5547  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5548  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5549  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5550  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5551  case ARM::VST1LNdAsm_U32: {
5552    MCInst TmpInst;
5553    // Shuffle the operands around so the lane index operand is in the
5554    // right place.
5555    unsigned Spacing;
5556    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5557    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5558    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5559    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5560    TmpInst.addOperand(Inst.getOperand(1)); // lane
5561    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5562    TmpInst.addOperand(Inst.getOperand(5));
5563    Inst = TmpInst;
5564    return true;
5565  }
5566
5567  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5568  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5569  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5570  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5571  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5572  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5573  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5574  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5575  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5576    MCInst TmpInst;
5577    // Shuffle the operands around so the lane index operand is in the
5578    // right place.
5579    unsigned Spacing;
5580    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5581    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5582    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5583    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5584    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5585                                            Spacing));
5586    TmpInst.addOperand(Inst.getOperand(1)); // lane
5587    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5588    TmpInst.addOperand(Inst.getOperand(5));
5589    Inst = TmpInst;
5590    return true;
5591  }
5592  // Handle NEON VLD complex aliases.
5593  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5594  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5595  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5596  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5597  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5598  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5599  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5600  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5601    MCInst TmpInst;
5602    // Shuffle the operands around so the lane index operand is in the
5603    // right place.
5604    unsigned Spacing;
5605    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5606    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5607    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5608    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5609    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5610    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5611    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5612    TmpInst.addOperand(Inst.getOperand(1)); // lane
5613    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5614    TmpInst.addOperand(Inst.getOperand(6));
5615    Inst = TmpInst;
5616    return true;
5617  }
5618
5619  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5620  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5621  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5622  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5623  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5624  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5625  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5626  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5627  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5628  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5629  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5630  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5631  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5632  case ARM::VLD2LNqWB_register_Asm_U32: {
5633    MCInst TmpInst;
5634    // Shuffle the operands around so the lane index operand is in the
5635    // right place.
5636    unsigned Spacing;
5637    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5638    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5639    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5640                                            Spacing));
5641    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5642    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5643    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5644    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5645    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5646    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5647                                            Spacing));
5648    TmpInst.addOperand(Inst.getOperand(1)); // lane
5649    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5650    TmpInst.addOperand(Inst.getOperand(6));
5651    Inst = TmpInst;
5652    return true;
5653  }
5654
5655  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5656  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5657  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5658  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5659  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5660  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5661  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5662  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5663    MCInst TmpInst;
5664    // Shuffle the operands around so the lane index operand is in the
5665    // right place.
5666    unsigned Spacing;
5667    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5668    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5669    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5670    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5671    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5672    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5673    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5674    TmpInst.addOperand(Inst.getOperand(1)); // lane
5675    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5676    TmpInst.addOperand(Inst.getOperand(5));
5677    Inst = TmpInst;
5678    return true;
5679  }
5680
5681  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5682  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5683  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5684  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5685  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5686  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5687  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5688  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5689  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5690  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5691  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5692  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5693  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5694  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5695    MCInst TmpInst;
5696    // Shuffle the operands around so the lane index operand is in the
5697    // right place.
5698    unsigned Spacing;
5699    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5700    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5701    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5702                                            Spacing));
5703    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5704    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5705    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5706    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5707    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5708    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5709                                            Spacing));
5710    TmpInst.addOperand(Inst.getOperand(1)); // lane
5711    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5712    TmpInst.addOperand(Inst.getOperand(5));
5713    Inst = TmpInst;
5714    return true;
5715  }
5716
5717  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5718  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5719  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5720  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5721  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5722  case ARM::VLD1LNdAsm_U32: {
5723    MCInst TmpInst;
5724    // Shuffle the operands around so the lane index operand is in the
5725    // right place.
5726    unsigned Spacing;
5727    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5728    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5729    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5730    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5731    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5732    TmpInst.addOperand(Inst.getOperand(1)); // lane
5733    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5734    TmpInst.addOperand(Inst.getOperand(5));
5735    Inst = TmpInst;
5736    return true;
5737  }
5738
5739  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5740  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5741  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5742  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5743  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5744  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5745  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5746  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5747  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5748  case ARM::VLD2LNqAsm_U32: {
5749    MCInst TmpInst;
5750    // Shuffle the operands around so the lane index operand is in the
5751    // right place.
5752    unsigned Spacing;
5753    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5754    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5755    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5756                                            Spacing));
5757    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5758    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5759    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5760    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5761                                            Spacing));
5762    TmpInst.addOperand(Inst.getOperand(1)); // lane
5763    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5764    TmpInst.addOperand(Inst.getOperand(5));
5765    Inst = TmpInst;
5766    return true;
5767  }
5768  // Handle the Thumb2 mode MOV complex aliases.
5769  case ARM::t2MOVsr:
5770  case ARM::t2MOVSsr: {
5771    // Which instruction to expand to depends on the CCOut operand and
5772    // whether we're in an IT block if the register operands are low
5773    // registers.
5774    bool isNarrow = false;
5775    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5776        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5777        isARMLowRegister(Inst.getOperand(2).getReg()) &&
5778        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
5779        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
5780      isNarrow = true;
5781    MCInst TmpInst;
5782    unsigned newOpc;
5783    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
5784    default: llvm_unreachable("unexpected opcode!");
5785    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
5786    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
5787    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
5788    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
5789    }
5790    TmpInst.setOpcode(newOpc);
5791    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5792    if (isNarrow)
5793      TmpInst.addOperand(MCOperand::CreateReg(
5794          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5795    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5796    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5797    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5798    TmpInst.addOperand(Inst.getOperand(5));
5799    if (!isNarrow)
5800      TmpInst.addOperand(MCOperand::CreateReg(
5801          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5802    Inst = TmpInst;
5803    return true;
5804  }
5805  case ARM::t2MOVsi:
5806  case ARM::t2MOVSsi: {
5807    // Which instruction to expand to depends on the CCOut operand and
5808    // whether we're in an IT block if the register operands are low
5809    // registers.
5810    bool isNarrow = false;
5811    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5812        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5813        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5814      isNarrow = true;
5815    MCInst TmpInst;
5816    unsigned newOpc;
5817    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5818    default: llvm_unreachable("unexpected opcode!");
5819    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5820    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5821    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5822    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5823    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
5824    }
5825    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5826    if (Ammount == 32) Ammount = 0;
5827    TmpInst.setOpcode(newOpc);
5828    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5829    if (isNarrow)
5830      TmpInst.addOperand(MCOperand::CreateReg(
5831          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5832    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5833    if (newOpc != ARM::t2RRX)
5834      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5835    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5836    TmpInst.addOperand(Inst.getOperand(4));
5837    if (!isNarrow)
5838      TmpInst.addOperand(MCOperand::CreateReg(
5839          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5840    Inst = TmpInst;
5841    return true;
5842  }
5843  // Handle the ARM mode MOV complex aliases.
5844  case ARM::ASRr:
5845  case ARM::LSRr:
5846  case ARM::LSLr:
5847  case ARM::RORr: {
5848    ARM_AM::ShiftOpc ShiftTy;
5849    switch(Inst.getOpcode()) {
5850    default: llvm_unreachable("unexpected opcode!");
5851    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5852    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5853    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5854    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5855    }
5856    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5857    MCInst TmpInst;
5858    TmpInst.setOpcode(ARM::MOVsr);
5859    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5860    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5861    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5862    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5863    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5864    TmpInst.addOperand(Inst.getOperand(4));
5865    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5866    Inst = TmpInst;
5867    return true;
5868  }
5869  case ARM::ASRi:
5870  case ARM::LSRi:
5871  case ARM::LSLi:
5872  case ARM::RORi: {
5873    ARM_AM::ShiftOpc ShiftTy;
5874    switch(Inst.getOpcode()) {
5875    default: llvm_unreachable("unexpected opcode!");
5876    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5877    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5878    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5879    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5880    }
5881    // A shift by zero is a plain MOVr, not a MOVsi.
5882    unsigned Amt = Inst.getOperand(2).getImm();
5883    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5884    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5885    MCInst TmpInst;
5886    TmpInst.setOpcode(Opc);
5887    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5888    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5889    if (Opc == ARM::MOVsi)
5890      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5891    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5892    TmpInst.addOperand(Inst.getOperand(4));
5893    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5894    Inst = TmpInst;
5895    return true;
5896  }
5897  case ARM::RRXi: {
5898    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5899    MCInst TmpInst;
5900    TmpInst.setOpcode(ARM::MOVsi);
5901    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5902    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5903    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5904    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5905    TmpInst.addOperand(Inst.getOperand(3));
5906    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5907    Inst = TmpInst;
5908    return true;
5909  }
5910  case ARM::t2LDMIA_UPD: {
5911    // If this is a load of a single register, then we should use
5912    // a post-indexed LDR instruction instead, per the ARM ARM.
5913    if (Inst.getNumOperands() != 5)
5914      return false;
5915    MCInst TmpInst;
5916    TmpInst.setOpcode(ARM::t2LDR_POST);
5917    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5918    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5919    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5920    TmpInst.addOperand(MCOperand::CreateImm(4));
5921    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5922    TmpInst.addOperand(Inst.getOperand(3));
5923    Inst = TmpInst;
5924    return true;
5925  }
5926  case ARM::t2STMDB_UPD: {
5927    // If this is a store of a single register, then we should use
5928    // a pre-indexed STR instruction instead, per the ARM ARM.
5929    if (Inst.getNumOperands() != 5)
5930      return false;
5931    MCInst TmpInst;
5932    TmpInst.setOpcode(ARM::t2STR_PRE);
5933    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5934    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5935    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5936    TmpInst.addOperand(MCOperand::CreateImm(-4));
5937    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5938    TmpInst.addOperand(Inst.getOperand(3));
5939    Inst = TmpInst;
5940    return true;
5941  }
5942  case ARM::LDMIA_UPD:
5943    // If this is a load of a single register via a 'pop', then we should use
5944    // a post-indexed LDR instruction instead, per the ARM ARM.
5945    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5946        Inst.getNumOperands() == 5) {
5947      MCInst TmpInst;
5948      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5949      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5950      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5951      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5952      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5953      TmpInst.addOperand(MCOperand::CreateImm(4));
5954      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5955      TmpInst.addOperand(Inst.getOperand(3));
5956      Inst = TmpInst;
5957      return true;
5958    }
5959    break;
5960  case ARM::STMDB_UPD:
5961    // If this is a store of a single register via a 'push', then we should use
5962    // a pre-indexed STR instruction instead, per the ARM ARM.
5963    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5964        Inst.getNumOperands() == 5) {
5965      MCInst TmpInst;
5966      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5967      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5968      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5969      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5970      TmpInst.addOperand(MCOperand::CreateImm(-4));
5971      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5972      TmpInst.addOperand(Inst.getOperand(3));
5973      Inst = TmpInst;
5974    }
5975    break;
5976  case ARM::t2ADDri12:
5977    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5978    // mnemonic was used (not "addw"), encoding T3 is preferred.
5979    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5980        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5981      break;
5982    Inst.setOpcode(ARM::t2ADDri);
5983    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5984    break;
5985  case ARM::t2SUBri12:
5986    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5987    // mnemonic was used (not "subw"), encoding T3 is preferred.
5988    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5989        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5990      break;
5991    Inst.setOpcode(ARM::t2SUBri);
5992    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5993    break;
5994  case ARM::tADDi8:
5995    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5996    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5997    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5998    // to encoding T1 if <Rd> is omitted."
5999    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6000      Inst.setOpcode(ARM::tADDi3);
6001      return true;
6002    }
6003    break;
6004  case ARM::tSUBi8:
6005    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6006    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6007    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6008    // to encoding T1 if <Rd> is omitted."
6009    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6010      Inst.setOpcode(ARM::tSUBi3);
6011      return true;
6012    }
6013    break;
6014  case ARM::t2ADDrr: {
6015    // If the destination and first source operand are the same, and
6016    // there's no setting of the flags, use encoding T2 instead of T3.
6017    // Note that this is only for ADD, not SUB. This mirrors the system
6018    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6019    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6020        Inst.getOperand(5).getReg() != 0 ||
6021        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6022         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6023      break;
6024    MCInst TmpInst;
6025    TmpInst.setOpcode(ARM::tADDhirr);
6026    TmpInst.addOperand(Inst.getOperand(0));
6027    TmpInst.addOperand(Inst.getOperand(0));
6028    TmpInst.addOperand(Inst.getOperand(2));
6029    TmpInst.addOperand(Inst.getOperand(3));
6030    TmpInst.addOperand(Inst.getOperand(4));
6031    Inst = TmpInst;
6032    return true;
6033  }
6034  case ARM::tB:
6035    // A Thumb conditional branch outside of an IT block is a tBcc.
6036    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6037      Inst.setOpcode(ARM::tBcc);
6038      return true;
6039    }
6040    break;
6041  case ARM::t2B:
6042    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6043    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6044      Inst.setOpcode(ARM::t2Bcc);
6045      return true;
6046    }
6047    break;
6048  case ARM::t2Bcc:
6049    // If the conditional is AL or we're in an IT block, we really want t2B.
6050    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6051      Inst.setOpcode(ARM::t2B);
6052      return true;
6053    }
6054    break;
6055  case ARM::tBcc:
6056    // If the conditional is AL, we really want tB.
6057    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6058      Inst.setOpcode(ARM::tB);
6059      return true;
6060    }
6061    break;
6062  case ARM::tLDMIA: {
6063    // If the register list contains any high registers, or if the writeback
6064    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6065    // instead if we're in Thumb2. Otherwise, this should have generated
6066    // an error in validateInstruction().
6067    unsigned Rn = Inst.getOperand(0).getReg();
6068    bool hasWritebackToken =
6069      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6070       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6071    bool listContainsBase;
6072    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6073        (!listContainsBase && !hasWritebackToken) ||
6074        (listContainsBase && hasWritebackToken)) {
6075      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6076      assert (isThumbTwo());
6077      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6078      // If we're switching to the updating version, we need to insert
6079      // the writeback tied operand.
6080      if (hasWritebackToken)
6081        Inst.insert(Inst.begin(),
6082                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6083      return true;
6084    }
6085    break;
6086  }
6087  case ARM::tSTMIA_UPD: {
6088    // If the register list contains any high registers, we need to use
6089    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6090    // should have generated an error in validateInstruction().
6091    unsigned Rn = Inst.getOperand(0).getReg();
6092    bool listContainsBase;
6093    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6094      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6095      assert (isThumbTwo());
6096      Inst.setOpcode(ARM::t2STMIA_UPD);
6097      return true;
6098    }
6099    break;
6100  }
6101  case ARM::tPOP: {
6102    bool listContainsBase;
6103    // If the register list contains any high registers, we need to use
6104    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6105    // should have generated an error in validateInstruction().
6106    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6107      return false;
6108    assert (isThumbTwo());
6109    Inst.setOpcode(ARM::t2LDMIA_UPD);
6110    // Add the base register and writeback operands.
6111    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6112    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6113    return true;
6114  }
6115  case ARM::tPUSH: {
6116    bool listContainsBase;
6117    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6118      return false;
6119    assert (isThumbTwo());
6120    Inst.setOpcode(ARM::t2STMDB_UPD);
6121    // Add the base register and writeback operands.
6122    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6123    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6124    return true;
6125  }
6126  case ARM::t2MOVi: {
6127    // If we can use the 16-bit encoding and the user didn't explicitly
6128    // request the 32-bit variant, transform it here.
6129    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6130        Inst.getOperand(1).getImm() <= 255 &&
6131        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6132         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6133        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6134        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6135         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6136      // The operands aren't in the same order for tMOVi8...
6137      MCInst TmpInst;
6138      TmpInst.setOpcode(ARM::tMOVi8);
6139      TmpInst.addOperand(Inst.getOperand(0));
6140      TmpInst.addOperand(Inst.getOperand(4));
6141      TmpInst.addOperand(Inst.getOperand(1));
6142      TmpInst.addOperand(Inst.getOperand(2));
6143      TmpInst.addOperand(Inst.getOperand(3));
6144      Inst = TmpInst;
6145      return true;
6146    }
6147    break;
6148  }
6149  case ARM::t2MOVr: {
6150    // If we can use the 16-bit encoding and the user didn't explicitly
6151    // request the 32-bit variant, transform it here.
6152    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6153        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6154        Inst.getOperand(2).getImm() == ARMCC::AL &&
6155        Inst.getOperand(4).getReg() == ARM::CPSR &&
6156        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6157         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6158      // The operands aren't the same for tMOV[S]r... (no cc_out)
6159      MCInst TmpInst;
6160      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6161      TmpInst.addOperand(Inst.getOperand(0));
6162      TmpInst.addOperand(Inst.getOperand(1));
6163      TmpInst.addOperand(Inst.getOperand(2));
6164      TmpInst.addOperand(Inst.getOperand(3));
6165      Inst = TmpInst;
6166      return true;
6167    }
6168    break;
6169  }
6170  case ARM::t2SXTH:
6171  case ARM::t2SXTB:
6172  case ARM::t2UXTH:
6173  case ARM::t2UXTB: {
6174    // If we can use the 16-bit encoding and the user didn't explicitly
6175    // request the 32-bit variant, transform it here.
6176    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6177        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6178        Inst.getOperand(2).getImm() == 0 &&
6179        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6180         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6181      unsigned NewOpc;
6182      switch (Inst.getOpcode()) {
6183      default: llvm_unreachable("Illegal opcode!");
6184      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6185      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6186      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6187      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6188      }
6189      // The operands aren't the same for thumb1 (no rotate operand).
6190      MCInst TmpInst;
6191      TmpInst.setOpcode(NewOpc);
6192      TmpInst.addOperand(Inst.getOperand(0));
6193      TmpInst.addOperand(Inst.getOperand(1));
6194      TmpInst.addOperand(Inst.getOperand(3));
6195      TmpInst.addOperand(Inst.getOperand(4));
6196      Inst = TmpInst;
6197      return true;
6198    }
6199    break;
6200  }
6201  case ARM::MOVsi: {
6202    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6203    if (SOpc == ARM_AM::rrx) return false;
6204    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6205      // Shifting by zero is accepted as a vanilla 'MOVr'
6206      MCInst TmpInst;
6207      TmpInst.setOpcode(ARM::MOVr);
6208      TmpInst.addOperand(Inst.getOperand(0));
6209      TmpInst.addOperand(Inst.getOperand(1));
6210      TmpInst.addOperand(Inst.getOperand(3));
6211      TmpInst.addOperand(Inst.getOperand(4));
6212      TmpInst.addOperand(Inst.getOperand(5));
6213      Inst = TmpInst;
6214      return true;
6215    }
6216    return false;
6217  }
6218  case ARM::ANDrsi:
6219  case ARM::ORRrsi:
6220  case ARM::EORrsi:
6221  case ARM::BICrsi:
6222  case ARM::SUBrsi:
6223  case ARM::ADDrsi: {
6224    unsigned newOpc;
6225    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
6226    if (SOpc == ARM_AM::rrx) return false;
6227    switch (Inst.getOpcode()) {
6228    default: assert(0 && "unexpected opcode!");
6229    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
6230    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
6231    case ARM::EORrsi: newOpc = ARM::EORrr; break;
6232    case ARM::BICrsi: newOpc = ARM::BICrr; break;
6233    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
6234    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
6235    }
6236    // If the shift is by zero, use the non-shifted instruction definition.
6237    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
6238      MCInst TmpInst;
6239      TmpInst.setOpcode(newOpc);
6240      TmpInst.addOperand(Inst.getOperand(0));
6241      TmpInst.addOperand(Inst.getOperand(1));
6242      TmpInst.addOperand(Inst.getOperand(2));
6243      TmpInst.addOperand(Inst.getOperand(4));
6244      TmpInst.addOperand(Inst.getOperand(5));
6245      TmpInst.addOperand(Inst.getOperand(6));
6246      Inst = TmpInst;
6247      return true;
6248    }
6249    return false;
6250  }
6251  case ARM::t2IT: {
6252    // The mask bits for all but the first condition are represented as
6253    // the low bit of the condition code value implies 't'. We currently
6254    // always have 1 implies 't', so XOR toggle the bits if the low bit
6255    // of the condition code is zero. The encoding also expects the low
6256    // bit of the condition to be encoded as bit 4 of the mask operand,
6257    // so mask that in if needed
6258    MCOperand &MO = Inst.getOperand(1);
6259    unsigned Mask = MO.getImm();
6260    unsigned OrigMask = Mask;
6261    unsigned TZ = CountTrailingZeros_32(Mask);
6262    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6263      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6264      for (unsigned i = 3; i != TZ; --i)
6265        Mask ^= 1 << i;
6266    } else
6267      Mask |= 0x10;
6268    MO.setImm(Mask);
6269
6270    // Set up the IT block state according to the IT instruction we just
6271    // matched.
6272    assert(!inITBlock() && "nested IT blocks?!");
6273    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6274    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6275    ITState.CurPosition = 0;
6276    ITState.FirstCond = true;
6277    break;
6278  }
6279  }
6280  return false;
6281}
6282
6283unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6284  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6285  // suffix depending on whether they're in an IT block or not.
6286  unsigned Opc = Inst.getOpcode();
6287  const MCInstrDesc &MCID = getInstDesc(Opc);
6288  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6289    assert(MCID.hasOptionalDef() &&
6290           "optionally flag setting instruction missing optional def operand");
6291    assert(MCID.NumOperands == Inst.getNumOperands() &&
6292           "operand count mismatch!");
6293    // Find the optional-def operand (cc_out).
6294    unsigned OpNo;
6295    for (OpNo = 0;
6296         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6297         ++OpNo)
6298      ;
6299    // If we're parsing Thumb1, reject it completely.
6300    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6301      return Match_MnemonicFail;
6302    // If we're parsing Thumb2, which form is legal depends on whether we're
6303    // in an IT block.
6304    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6305        !inITBlock())
6306      return Match_RequiresITBlock;
6307    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6308        inITBlock())
6309      return Match_RequiresNotITBlock;
6310  }
6311  // Some high-register supporting Thumb1 encodings only allow both registers
6312  // to be from r0-r7 when in Thumb2.
6313  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6314           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6315           isARMLowRegister(Inst.getOperand(2).getReg()))
6316    return Match_RequiresThumb2;
6317  // Others only require ARMv6 or later.
6318  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6319           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6320           isARMLowRegister(Inst.getOperand(1).getReg()))
6321    return Match_RequiresV6;
6322  return Match_Success;
6323}
6324
6325bool ARMAsmParser::
6326MatchAndEmitInstruction(SMLoc IDLoc,
6327                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6328                        MCStreamer &Out) {
6329  MCInst Inst;
6330  unsigned ErrorInfo;
6331  unsigned MatchResult;
6332  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6333  switch (MatchResult) {
6334  default: break;
6335  case Match_Success:
6336    // Context sensitive operand constraints aren't handled by the matcher,
6337    // so check them here.
6338    if (validateInstruction(Inst, Operands)) {
6339      // Still progress the IT block, otherwise one wrong condition causes
6340      // nasty cascading errors.
6341      forwardITPosition();
6342      return true;
6343    }
6344
6345    // Some instructions need post-processing to, for example, tweak which
6346    // encoding is selected. Loop on it while changes happen so the
6347    // individual transformations can chain off each other. E.g.,
6348    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6349    while (processInstruction(Inst, Operands))
6350      ;
6351
6352    // Only move forward at the very end so that everything in validate
6353    // and process gets a consistent answer about whether we're in an IT
6354    // block.
6355    forwardITPosition();
6356
6357    Out.EmitInstruction(Inst);
6358    return false;
6359  case Match_MissingFeature:
6360    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6361    return true;
6362  case Match_InvalidOperand: {
6363    SMLoc ErrorLoc = IDLoc;
6364    if (ErrorInfo != ~0U) {
6365      if (ErrorInfo >= Operands.size())
6366        return Error(IDLoc, "too few operands for instruction");
6367
6368      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6369      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6370    }
6371
6372    return Error(ErrorLoc, "invalid operand for instruction");
6373  }
6374  case Match_MnemonicFail:
6375    return Error(IDLoc, "invalid instruction");
6376  case Match_ConversionFail:
6377    // The converter function will have already emited a diagnostic.
6378    return true;
6379  case Match_RequiresNotITBlock:
6380    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6381  case Match_RequiresITBlock:
6382    return Error(IDLoc, "instruction only valid inside IT block");
6383  case Match_RequiresV6:
6384    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6385  case Match_RequiresThumb2:
6386    return Error(IDLoc, "instruction variant requires Thumb2");
6387  }
6388
6389  llvm_unreachable("Implement any new match types added!");
6390  return true;
6391}
6392
6393/// parseDirective parses the arm specific directives
6394bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6395  StringRef IDVal = DirectiveID.getIdentifier();
6396  if (IDVal == ".word")
6397    return parseDirectiveWord(4, DirectiveID.getLoc());
6398  else if (IDVal == ".thumb")
6399    return parseDirectiveThumb(DirectiveID.getLoc());
6400  else if (IDVal == ".arm")
6401    return parseDirectiveARM(DirectiveID.getLoc());
6402  else if (IDVal == ".thumb_func")
6403    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6404  else if (IDVal == ".code")
6405    return parseDirectiveCode(DirectiveID.getLoc());
6406  else if (IDVal == ".syntax")
6407    return parseDirectiveSyntax(DirectiveID.getLoc());
6408  else if (IDVal == ".unreq")
6409    return parseDirectiveUnreq(DirectiveID.getLoc());
6410  else if (IDVal == ".arch")
6411    return parseDirectiveArch(DirectiveID.getLoc());
6412  else if (IDVal == ".eabi_attribute")
6413    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6414  return true;
6415}
6416
6417/// parseDirectiveWord
6418///  ::= .word [ expression (, expression)* ]
6419bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6420  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6421    for (;;) {
6422      const MCExpr *Value;
6423      if (getParser().ParseExpression(Value))
6424        return true;
6425
6426      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6427
6428      if (getLexer().is(AsmToken::EndOfStatement))
6429        break;
6430
6431      // FIXME: Improve diagnostic.
6432      if (getLexer().isNot(AsmToken::Comma))
6433        return Error(L, "unexpected token in directive");
6434      Parser.Lex();
6435    }
6436  }
6437
6438  Parser.Lex();
6439  return false;
6440}
6441
6442/// parseDirectiveThumb
6443///  ::= .thumb
6444bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6445  if (getLexer().isNot(AsmToken::EndOfStatement))
6446    return Error(L, "unexpected token in directive");
6447  Parser.Lex();
6448
6449  if (!isThumb())
6450    SwitchMode();
6451  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6452  return false;
6453}
6454
6455/// parseDirectiveARM
6456///  ::= .arm
6457bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6458  if (getLexer().isNot(AsmToken::EndOfStatement))
6459    return Error(L, "unexpected token in directive");
6460  Parser.Lex();
6461
6462  if (isThumb())
6463    SwitchMode();
6464  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6465  return false;
6466}
6467
6468/// parseDirectiveThumbFunc
6469///  ::= .thumbfunc symbol_name
6470bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6471  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6472  bool isMachO = MAI.hasSubsectionsViaSymbols();
6473  StringRef Name;
6474  bool needFuncName = true;
6475
6476  // Darwin asm has (optionally) function name after .thumb_func direction
6477  // ELF doesn't
6478  if (isMachO) {
6479    const AsmToken &Tok = Parser.getTok();
6480    if (Tok.isNot(AsmToken::EndOfStatement)) {
6481      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6482        return Error(L, "unexpected token in .thumb_func directive");
6483      Name = Tok.getIdentifier();
6484      Parser.Lex(); // Consume the identifier token.
6485      needFuncName = false;
6486    }
6487  }
6488
6489  if (getLexer().isNot(AsmToken::EndOfStatement))
6490    return Error(L, "unexpected token in directive");
6491
6492  // Eat the end of statement and any blank lines that follow.
6493  while (getLexer().is(AsmToken::EndOfStatement))
6494    Parser.Lex();
6495
6496  // FIXME: assuming function name will be the line following .thumb_func
6497  // We really should be checking the next symbol definition even if there's
6498  // stuff in between.
6499  if (needFuncName) {
6500    Name = Parser.getTok().getIdentifier();
6501  }
6502
6503  // Mark symbol as a thumb symbol.
6504  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6505  getParser().getStreamer().EmitThumbFunc(Func);
6506  return false;
6507}
6508
6509/// parseDirectiveSyntax
6510///  ::= .syntax unified | divided
6511bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6512  const AsmToken &Tok = Parser.getTok();
6513  if (Tok.isNot(AsmToken::Identifier))
6514    return Error(L, "unexpected token in .syntax directive");
6515  StringRef Mode = Tok.getString();
6516  if (Mode == "unified" || Mode == "UNIFIED")
6517    Parser.Lex();
6518  else if (Mode == "divided" || Mode == "DIVIDED")
6519    return Error(L, "'.syntax divided' arm asssembly not supported");
6520  else
6521    return Error(L, "unrecognized syntax mode in .syntax directive");
6522
6523  if (getLexer().isNot(AsmToken::EndOfStatement))
6524    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6525  Parser.Lex();
6526
6527  // TODO tell the MC streamer the mode
6528  // getParser().getStreamer().Emit???();
6529  return false;
6530}
6531
6532/// parseDirectiveCode
6533///  ::= .code 16 | 32
6534bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6535  const AsmToken &Tok = Parser.getTok();
6536  if (Tok.isNot(AsmToken::Integer))
6537    return Error(L, "unexpected token in .code directive");
6538  int64_t Val = Parser.getTok().getIntVal();
6539  if (Val == 16)
6540    Parser.Lex();
6541  else if (Val == 32)
6542    Parser.Lex();
6543  else
6544    return Error(L, "invalid operand to .code directive");
6545
6546  if (getLexer().isNot(AsmToken::EndOfStatement))
6547    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6548  Parser.Lex();
6549
6550  if (Val == 16) {
6551    if (!isThumb())
6552      SwitchMode();
6553    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6554  } else {
6555    if (isThumb())
6556      SwitchMode();
6557    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6558  }
6559
6560  return false;
6561}
6562
6563/// parseDirectiveReq
6564///  ::= name .req registername
6565bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6566  Parser.Lex(); // Eat the '.req' token.
6567  unsigned Reg;
6568  SMLoc SRegLoc, ERegLoc;
6569  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6570    Parser.EatToEndOfStatement();
6571    return Error(SRegLoc, "register name expected");
6572  }
6573
6574  // Shouldn't be anything else.
6575  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6576    Parser.EatToEndOfStatement();
6577    return Error(Parser.getTok().getLoc(),
6578                 "unexpected input in .req directive.");
6579  }
6580
6581  Parser.Lex(); // Consume the EndOfStatement
6582
6583  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6584    return Error(SRegLoc, "redefinition of '" + Name +
6585                          "' does not match original.");
6586
6587  return false;
6588}
6589
6590/// parseDirectiveUneq
6591///  ::= .unreq registername
6592bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6593  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6594    Parser.EatToEndOfStatement();
6595    return Error(L, "unexpected input in .unreq directive.");
6596  }
6597  RegisterReqs.erase(Parser.getTok().getIdentifier());
6598  Parser.Lex(); // Eat the identifier.
6599  return false;
6600}
6601
6602/// parseDirectiveArch
6603///  ::= .arch token
6604bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6605  return true;
6606}
6607
6608/// parseDirectiveEabiAttr
6609///  ::= .eabi_attribute int, int
6610bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6611  return true;
6612}
6613
6614extern "C" void LLVMInitializeARMAsmLexer();
6615
6616/// Force static initialization.
6617extern "C" void LLVMInitializeARMAsmParser() {
6618  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6619  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6620  LLVMInitializeARMAsmLexer();
6621}
6622
6623#define GET_REGISTER_MATCHER
6624#define GET_MATCHER_IMPLEMENTATION
6625#include "ARMGenAsmMatcher.inc"
6626