ARMAsmParser.cpp revision ae69f703d59410fc96f04be3c1afeaa1c17a45ce
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_FPImmediate,
274    k_MemBarrierOpt,
275    k_Memory,
276    k_PostIndexRegister,
277    k_MSRMask,
278    k_ProcIFlags,
279    k_VectorIndex,
280    k_Register,
281    k_RegisterList,
282    k_DPRRegisterList,
283    k_SPRRegisterList,
284    k_VectorList,
285    k_VectorListAllLanes,
286    k_VectorListIndexed,
287    k_ShiftedRegister,
288    k_ShiftedImmediate,
289    k_ShifterImmediate,
290    k_RotateImmediate,
291    k_BitfieldDescriptor,
292    k_Token
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296  SmallVector<unsigned, 8> Registers;
297
298  union {
299    struct {
300      ARMCC::CondCodes Val;
301    } CC;
302
303    struct {
304      unsigned Val;
305    } Cop;
306
307    struct {
308      unsigned Val;
309    } CoprocOption;
310
311    struct {
312      unsigned Mask:4;
313    } ITMask;
314
315    struct {
316      ARM_MB::MemBOpt Val;
317    } MBOpt;
318
319    struct {
320      ARM_PROC::IFlags Val;
321    } IFlags;
322
323    struct {
324      unsigned Val;
325    } MMask;
326
327    struct {
328      const char *Data;
329      unsigned Length;
330    } Tok;
331
332    struct {
333      unsigned RegNum;
334    } Reg;
335
336    // A vector register list is a sequential list of 1 to 4 registers.
337    struct {
338      unsigned RegNum;
339      unsigned Count;
340      unsigned LaneIndex;
341      bool isDoubleSpaced;
342    } VectorList;
343
344    struct {
345      unsigned Val;
346    } VectorIndex;
347
348    struct {
349      const MCExpr *Val;
350    } Imm;
351
352    struct {
353      unsigned Val;       // encoded 8-bit representation
354    } FPImm;
355
356    /// Combined record for all forms of ARM address expressions.
357    struct {
358      unsigned BaseRegNum;
359      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
360      // was specified.
361      const MCConstantExpr *OffsetImm;  // Offset immediate value
362      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
363      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
364      unsigned ShiftImm;        // shift for OffsetReg.
365      unsigned Alignment;       // 0 = no alignment specified
366                                // n = alignment in bytes (2, 4, 8, 16, or 32)
367      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
368    } Memory;
369
370    struct {
371      unsigned RegNum;
372      bool isAdd;
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned ShiftImm;
375    } PostIdxReg;
376
377    struct {
378      bool isASR;
379      unsigned Imm;
380    } ShifterImm;
381    struct {
382      ARM_AM::ShiftOpc ShiftTy;
383      unsigned SrcReg;
384      unsigned ShiftReg;
385      unsigned ShiftImm;
386    } RegShiftedReg;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftImm;
391    } RegShiftedImm;
392    struct {
393      unsigned Imm;
394    } RotImm;
395    struct {
396      unsigned LSB;
397      unsigned Width;
398    } Bitfield;
399  };
400
401  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
402public:
403  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
404    Kind = o.Kind;
405    StartLoc = o.StartLoc;
406    EndLoc = o.EndLoc;
407    switch (Kind) {
408    case k_CondCode:
409      CC = o.CC;
410      break;
411    case k_ITCondMask:
412      ITMask = o.ITMask;
413      break;
414    case k_Token:
415      Tok = o.Tok;
416      break;
417    case k_CCOut:
418    case k_Register:
419      Reg = o.Reg;
420      break;
421    case k_RegisterList:
422    case k_DPRRegisterList:
423    case k_SPRRegisterList:
424      Registers = o.Registers;
425      break;
426    case k_VectorList:
427    case k_VectorListAllLanes:
428    case k_VectorListIndexed:
429      VectorList = o.VectorList;
430      break;
431    case k_CoprocNum:
432    case k_CoprocReg:
433      Cop = o.Cop;
434      break;
435    case k_CoprocOption:
436      CoprocOption = o.CoprocOption;
437      break;
438    case k_Immediate:
439      Imm = o.Imm;
440      break;
441    case k_FPImmediate:
442      FPImm = o.FPImm;
443      break;
444    case k_MemBarrierOpt:
445      MBOpt = o.MBOpt;
446      break;
447    case k_Memory:
448      Memory = o.Memory;
449      break;
450    case k_PostIndexRegister:
451      PostIdxReg = o.PostIdxReg;
452      break;
453    case k_MSRMask:
454      MMask = o.MMask;
455      break;
456    case k_ProcIFlags:
457      IFlags = o.IFlags;
458      break;
459    case k_ShifterImmediate:
460      ShifterImm = o.ShifterImm;
461      break;
462    case k_ShiftedRegister:
463      RegShiftedReg = o.RegShiftedReg;
464      break;
465    case k_ShiftedImmediate:
466      RegShiftedImm = o.RegShiftedImm;
467      break;
468    case k_RotateImmediate:
469      RotImm = o.RotImm;
470      break;
471    case k_BitfieldDescriptor:
472      Bitfield = o.Bitfield;
473      break;
474    case k_VectorIndex:
475      VectorIndex = o.VectorIndex;
476      break;
477    }
478  }
479
480  /// getStartLoc - Get the location of the first token of this operand.
481  SMLoc getStartLoc() const { return StartLoc; }
482  /// getEndLoc - Get the location of the last token of this operand.
483  SMLoc getEndLoc() const { return EndLoc; }
484
485  ARMCC::CondCodes getCondCode() const {
486    assert(Kind == k_CondCode && "Invalid access!");
487    return CC.Val;
488  }
489
490  unsigned getCoproc() const {
491    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
492    return Cop.Val;
493  }
494
495  StringRef getToken() const {
496    assert(Kind == k_Token && "Invalid access!");
497    return StringRef(Tok.Data, Tok.Length);
498  }
499
500  unsigned getReg() const {
501    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
502    return Reg.RegNum;
503  }
504
505  const SmallVectorImpl<unsigned> &getRegList() const {
506    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
507            Kind == k_SPRRegisterList) && "Invalid access!");
508    return Registers;
509  }
510
511  const MCExpr *getImm() const {
512    assert(isImm() && "Invalid access!");
513    return Imm.Val;
514  }
515
516  unsigned getFPImm() const {
517    assert(Kind == k_FPImmediate && "Invalid access!");
518    return FPImm.Val;
519  }
520
521  unsigned getVectorIndex() const {
522    assert(Kind == k_VectorIndex && "Invalid access!");
523    return VectorIndex.Val;
524  }
525
526  ARM_MB::MemBOpt getMemBarrierOpt() const {
527    assert(Kind == k_MemBarrierOpt && "Invalid access!");
528    return MBOpt.Val;
529  }
530
531  ARM_PROC::IFlags getProcIFlags() const {
532    assert(Kind == k_ProcIFlags && "Invalid access!");
533    return IFlags.Val;
534  }
535
536  unsigned getMSRMask() const {
537    assert(Kind == k_MSRMask && "Invalid access!");
538    return MMask.Val;
539  }
540
541  bool isCoprocNum() const { return Kind == k_CoprocNum; }
542  bool isCoprocReg() const { return Kind == k_CoprocReg; }
543  bool isCoprocOption() const { return Kind == k_CoprocOption; }
544  bool isCondCode() const { return Kind == k_CondCode; }
545  bool isCCOut() const { return Kind == k_CCOut; }
546  bool isITMask() const { return Kind == k_ITCondMask; }
547  bool isITCondCode() const { return Kind == k_CondCode; }
548  bool isImm() const { return Kind == k_Immediate; }
549  bool isFPImm() const { return Kind == k_FPImmediate; }
550  bool isFBits16() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 0 && Value <= 16;
556  }
557  bool isFBits32() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return Value >= 1 && Value <= 32;
563  }
564  bool isImm8s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
570  }
571  bool isImm0_1020s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
577  }
578  bool isImm0_508s4() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
584  }
585  bool isImm0_255() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 256;
591  }
592  bool isImm0_1() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 2;
598  }
599  bool isImm0_3() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 4;
605  }
606  bool isImm0_7() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 8;
612  }
613  bool isImm0_15() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 16;
619  }
620  bool isImm0_31() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 32;
626  }
627  bool isImm0_63() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value >= 0 && Value < 64;
633  }
634  bool isImm8() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 8;
640  }
641  bool isImm16() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 16;
647  }
648  bool isImm32() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value == 32;
654  }
655  bool isShrImm8() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 8;
661  }
662  bool isShrImm16() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 16;
668  }
669  bool isShrImm32() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 32;
675  }
676  bool isShrImm64() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value <= 64;
682  }
683  bool isImm1_7() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 8;
689  }
690  bool isImm1_15() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 16;
696  }
697  bool isImm1_31() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 32;
703  }
704  bool isImm1_16() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 17;
710  }
711  bool isImm1_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value > 0 && Value < 33;
717  }
718  bool isImm0_32() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 33;
724  }
725  bool isImm0_65535() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    if (!CE) return false;
729    int64_t Value = CE->getValue();
730    return Value >= 0 && Value < 65536;
731  }
732  bool isImm0_65535Expr() const {
733    if (!isImm()) return false;
734    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
735    // If it's not a constant expression, it'll generate a fixup and be
736    // handled later.
737    if (!CE) return true;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value < 65536;
740  }
741  bool isImm24bit() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value >= 0 && Value <= 0xffffff;
747  }
748  bool isImmThumbSR() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value > 0 && Value < 33;
754  }
755  bool isPKHLSLImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value >= 0 && Value < 32;
761  }
762  bool isPKHASRImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return Value > 0 && Value <= 32;
768  }
769  bool isARMSOImm() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(Value) != -1;
775  }
776  bool isARMSOImmNot() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(~Value) != -1;
782  }
783  bool isARMSOImmNeg() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getSOImmVal(-Value) != -1;
789  }
790  bool isT2SOImm() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(Value) != -1;
796  }
797  bool isT2SOImmNot() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(~Value) != -1;
803  }
804  bool isT2SOImmNeg() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return ARM_AM::getT2SOImmVal(-Value) != -1;
810  }
811  bool isSetEndImm() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = CE->getValue();
816    return Value == 1 || Value == 0;
817  }
818  bool isReg() const { return Kind == k_Register; }
819  bool isRegList() const { return Kind == k_RegisterList; }
820  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
821  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
822  bool isToken() const { return Kind == k_Token; }
823  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
824  bool isMemory() const { return Kind == k_Memory; }
825  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
826  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
827  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
828  bool isRotImm() const { return Kind == k_RotateImmediate; }
829  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
830  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
831  bool isPostIdxReg() const {
832    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
833  }
834  bool isMemNoOffset(bool alignOK = false) const {
835    if (!isMemory())
836      return false;
837    // No offset of any kind.
838    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
839     (alignOK || Memory.Alignment == 0);
840  }
841  bool isMemPCRelImm12() const {
842    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
843      return false;
844    // Base register must be PC.
845    if (Memory.BaseRegNum != ARM::PC)
846      return false;
847    // Immediate offset in range [-4095, 4095].
848    if (!Memory.OffsetImm) return true;
849    int64_t Val = Memory.OffsetImm->getValue();
850    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
851  }
852  bool isAlignedMemory() const {
853    return isMemNoOffset(true);
854  }
855  bool isAddrMode2() const {
856    if (!isMemory() || Memory.Alignment != 0) return false;
857    // Check for register offset.
858    if (Memory.OffsetRegNum) return true;
859    // Immediate offset in range [-4095, 4095].
860    if (!Memory.OffsetImm) return true;
861    int64_t Val = Memory.OffsetImm->getValue();
862    return Val > -4096 && Val < 4096;
863  }
864  bool isAM2OffsetImm() const {
865    if (!isImm()) return false;
866    // Immediate offset in range [-4095, 4095].
867    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
868    if (!CE) return false;
869    int64_t Val = CE->getValue();
870    return Val > -4096 && Val < 4096;
871  }
872  bool isAddrMode3() const {
873    // If we have an immediate that's not a constant, treat it as a label
874    // reference needing a fixup. If it is a constant, it's something else
875    // and we reject it.
876    if (isImm() && !isa<MCConstantExpr>(getImm()))
877      return true;
878    if (!isMemory() || Memory.Alignment != 0) return false;
879    // No shifts are legal for AM3.
880    if (Memory.ShiftType != ARM_AM::no_shift) return false;
881    // Check for register offset.
882    if (Memory.OffsetRegNum) return true;
883    // Immediate offset in range [-255, 255].
884    if (!Memory.OffsetImm) return true;
885    int64_t Val = Memory.OffsetImm->getValue();
886    return Val > -256 && Val < 256;
887  }
888  bool isAM3Offset() const {
889    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
890      return false;
891    if (Kind == k_PostIndexRegister)
892      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
893    // Immediate offset in range [-255, 255].
894    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
895    if (!CE) return false;
896    int64_t Val = CE->getValue();
897    // Special case, #-0 is INT32_MIN.
898    return (Val > -256 && Val < 256) || Val == INT32_MIN;
899  }
900  bool isAddrMode5() const {
901    // If we have an immediate that's not a constant, treat it as a label
902    // reference needing a fixup. If it is a constant, it's something else
903    // and we reject it.
904    if (isImm() && !isa<MCConstantExpr>(getImm()))
905      return true;
906    if (!isMemory() || Memory.Alignment != 0) return false;
907    // Check for register offset.
908    if (Memory.OffsetRegNum) return false;
909    // Immediate offset in range [-1020, 1020] and a multiple of 4.
910    if (!Memory.OffsetImm) return true;
911    int64_t Val = Memory.OffsetImm->getValue();
912    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
913      Val == INT32_MIN;
914  }
915  bool isMemTBB() const {
916    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
917        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
918      return false;
919    return true;
920  }
921  bool isMemTBH() const {
922    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
923        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
924        Memory.Alignment != 0 )
925      return false;
926    return true;
927  }
928  bool isMemRegOffset() const {
929    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
930      return false;
931    return true;
932  }
933  bool isT2MemRegOffset() const {
934    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
935        Memory.Alignment != 0)
936      return false;
937    // Only lsl #{0, 1, 2, 3} allowed.
938    if (Memory.ShiftType == ARM_AM::no_shift)
939      return true;
940    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
941      return false;
942    return true;
943  }
944  bool isMemThumbRR() const {
945    // Thumb reg+reg addressing is simple. Just two registers, a base and
946    // an offset. No shifts, negations or any other complicating factors.
947    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
948        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
949      return false;
950    return isARMLowRegister(Memory.BaseRegNum) &&
951      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
952  }
953  bool isMemThumbRIs4() const {
954    if (!isMemory() || Memory.OffsetRegNum != 0 ||
955        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
956      return false;
957    // Immediate offset, multiple of 4 in range [0, 124].
958    if (!Memory.OffsetImm) return true;
959    int64_t Val = Memory.OffsetImm->getValue();
960    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
961  }
962  bool isMemThumbRIs2() const {
963    if (!isMemory() || Memory.OffsetRegNum != 0 ||
964        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
965      return false;
966    // Immediate offset, multiple of 4 in range [0, 62].
967    if (!Memory.OffsetImm) return true;
968    int64_t Val = Memory.OffsetImm->getValue();
969    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
970  }
971  bool isMemThumbRIs1() const {
972    if (!isMemory() || Memory.OffsetRegNum != 0 ||
973        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
974      return false;
975    // Immediate offset in range [0, 31].
976    if (!Memory.OffsetImm) return true;
977    int64_t Val = Memory.OffsetImm->getValue();
978    return Val >= 0 && Val <= 31;
979  }
980  bool isMemThumbSPI() const {
981    if (!isMemory() || Memory.OffsetRegNum != 0 ||
982        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
983      return false;
984    // Immediate offset, multiple of 4 in range [0, 1020].
985    if (!Memory.OffsetImm) return true;
986    int64_t Val = Memory.OffsetImm->getValue();
987    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
988  }
989  bool isMemImm8s4Offset() const {
990    // If we have an immediate that's not a constant, treat it as a label
991    // reference needing a fixup. If it is a constant, it's something else
992    // and we reject it.
993    if (isImm() && !isa<MCConstantExpr>(getImm()))
994      return true;
995    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
996      return false;
997    // Immediate offset a multiple of 4 in range [-1020, 1020].
998    if (!Memory.OffsetImm) return true;
999    int64_t Val = Memory.OffsetImm->getValue();
1000    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1001  }
1002  bool isMemImm0_1020s4Offset() const {
1003    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1004      return false;
1005    // Immediate offset a multiple of 4 in range [0, 1020].
1006    if (!Memory.OffsetImm) return true;
1007    int64_t Val = Memory.OffsetImm->getValue();
1008    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1009  }
1010  bool isMemImm8Offset() const {
1011    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1012      return false;
1013    // Base reg of PC isn't allowed for these encodings.
1014    if (Memory.BaseRegNum == ARM::PC) return false;
1015    // Immediate offset in range [-255, 255].
1016    if (!Memory.OffsetImm) return true;
1017    int64_t Val = Memory.OffsetImm->getValue();
1018    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1019  }
1020  bool isMemPosImm8Offset() const {
1021    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1022      return false;
1023    // Immediate offset in range [0, 255].
1024    if (!Memory.OffsetImm) return true;
1025    int64_t Val = Memory.OffsetImm->getValue();
1026    return Val >= 0 && Val < 256;
1027  }
1028  bool isMemNegImm8Offset() const {
1029    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1030      return false;
1031    // Base reg of PC isn't allowed for these encodings.
1032    if (Memory.BaseRegNum == ARM::PC) return false;
1033    // Immediate offset in range [-255, -1].
1034    if (!Memory.OffsetImm) return false;
1035    int64_t Val = Memory.OffsetImm->getValue();
1036    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1037  }
1038  bool isMemUImm12Offset() const {
1039    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1040      return false;
1041    // Immediate offset in range [0, 4095].
1042    if (!Memory.OffsetImm) return true;
1043    int64_t Val = Memory.OffsetImm->getValue();
1044    return (Val >= 0 && Val < 4096);
1045  }
1046  bool isMemImm12Offset() const {
1047    // If we have an immediate that's not a constant, treat it as a label
1048    // reference needing a fixup. If it is a constant, it's something else
1049    // and we reject it.
1050    if (isImm() && !isa<MCConstantExpr>(getImm()))
1051      return true;
1052
1053    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1054      return false;
1055    // Immediate offset in range [-4095, 4095].
1056    if (!Memory.OffsetImm) return true;
1057    int64_t Val = Memory.OffsetImm->getValue();
1058    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1059  }
1060  bool isPostIdxImm8() const {
1061    if (!isImm()) return false;
1062    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1063    if (!CE) return false;
1064    int64_t Val = CE->getValue();
1065    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1066  }
1067  bool isPostIdxImm8s4() const {
1068    if (!isImm()) return false;
1069    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1070    if (!CE) return false;
1071    int64_t Val = CE->getValue();
1072    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1073      (Val == INT32_MIN);
1074  }
1075
1076  bool isMSRMask() const { return Kind == k_MSRMask; }
1077  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1078
1079  // NEON operands.
1080  bool isSingleSpacedVectorList() const {
1081    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1082  }
1083  bool isDoubleSpacedVectorList() const {
1084    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1085  }
1086  bool isVecListOneD() const {
1087    if (!isSingleSpacedVectorList()) return false;
1088    return VectorList.Count == 1;
1089  }
1090
1091  bool isVecListTwoD() const {
1092    if (!isSingleSpacedVectorList()) return false;
1093    return VectorList.Count == 2;
1094  }
1095
1096  bool isVecListThreeD() const {
1097    if (!isSingleSpacedVectorList()) return false;
1098    return VectorList.Count == 3;
1099  }
1100
1101  bool isVecListFourD() const {
1102    if (!isSingleSpacedVectorList()) return false;
1103    return VectorList.Count == 4;
1104  }
1105
1106  bool isVecListTwoQ() const {
1107    if (!isDoubleSpacedVectorList()) return false;
1108    return VectorList.Count == 2;
1109  }
1110
1111  bool isSingleSpacedVectorAllLanes() const {
1112    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1113  }
1114  bool isDoubleSpacedVectorAllLanes() const {
1115    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1116  }
1117  bool isVecListOneDAllLanes() const {
1118    if (!isSingleSpacedVectorAllLanes()) return false;
1119    return VectorList.Count == 1;
1120  }
1121
1122  bool isVecListTwoDAllLanes() const {
1123    if (!isSingleSpacedVectorAllLanes()) return false;
1124    return VectorList.Count == 2;
1125  }
1126
1127  bool isVecListTwoQAllLanes() const {
1128    if (!isDoubleSpacedVectorAllLanes()) return false;
1129    return VectorList.Count == 2;
1130  }
1131
1132  bool isSingleSpacedVectorIndexed() const {
1133    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1134  }
1135  bool isDoubleSpacedVectorIndexed() const {
1136    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1137  }
1138  bool isVecListOneDByteIndexed() const {
1139    if (!isSingleSpacedVectorIndexed()) return false;
1140    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1141  }
1142
1143  bool isVecListOneDHWordIndexed() const {
1144    if (!isSingleSpacedVectorIndexed()) return false;
1145    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1146  }
1147
1148  bool isVecListOneDWordIndexed() const {
1149    if (!isSingleSpacedVectorIndexed()) return false;
1150    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1151  }
1152
1153  bool isVecListTwoDByteIndexed() const {
1154    if (!isSingleSpacedVectorIndexed()) return false;
1155    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1156  }
1157
1158  bool isVecListTwoDHWordIndexed() const {
1159    if (!isSingleSpacedVectorIndexed()) return false;
1160    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1161  }
1162
1163  bool isVecListTwoQWordIndexed() const {
1164    if (!isDoubleSpacedVectorIndexed()) return false;
1165    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1166  }
1167
1168  bool isVecListTwoQHWordIndexed() const {
1169    if (!isDoubleSpacedVectorIndexed()) return false;
1170    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1171  }
1172
1173  bool isVecListTwoDWordIndexed() const {
1174    if (!isSingleSpacedVectorIndexed()) return false;
1175    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1176  }
1177
1178  bool isVectorIndex8() const {
1179    if (Kind != k_VectorIndex) return false;
1180    return VectorIndex.Val < 8;
1181  }
1182  bool isVectorIndex16() const {
1183    if (Kind != k_VectorIndex) return false;
1184    return VectorIndex.Val < 4;
1185  }
1186  bool isVectorIndex32() const {
1187    if (Kind != k_VectorIndex) return false;
1188    return VectorIndex.Val < 2;
1189  }
1190
1191  bool isNEONi8splat() const {
1192    if (!isImm()) return false;
1193    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1194    // Must be a constant.
1195    if (!CE) return false;
1196    int64_t Value = CE->getValue();
1197    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1198    // value.
1199    return Value >= 0 && Value < 256;
1200  }
1201
1202  bool isNEONi16splat() const {
1203    if (!isImm()) return false;
1204    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1205    // Must be a constant.
1206    if (!CE) return false;
1207    int64_t Value = CE->getValue();
1208    // i16 value in the range [0,255] or [0x0100, 0xff00]
1209    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1210  }
1211
1212  bool isNEONi32splat() const {
1213    if (!isImm()) return false;
1214    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1215    // Must be a constant.
1216    if (!CE) return false;
1217    int64_t Value = CE->getValue();
1218    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1219    return (Value >= 0 && Value < 256) ||
1220      (Value >= 0x0100 && Value <= 0xff00) ||
1221      (Value >= 0x010000 && Value <= 0xff0000) ||
1222      (Value >= 0x01000000 && Value <= 0xff000000);
1223  }
1224
1225  bool isNEONi32vmov() const {
1226    if (!isImm()) return false;
1227    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1228    // Must be a constant.
1229    if (!CE) return false;
1230    int64_t Value = CE->getValue();
1231    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1232    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1233    return (Value >= 0 && Value < 256) ||
1234      (Value >= 0x0100 && Value <= 0xff00) ||
1235      (Value >= 0x010000 && Value <= 0xff0000) ||
1236      (Value >= 0x01000000 && Value <= 0xff000000) ||
1237      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1238      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1239  }
1240  bool isNEONi32vmovNeg() const {
1241    if (!isImm()) return false;
1242    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1243    // Must be a constant.
1244    if (!CE) return false;
1245    int64_t Value = ~CE->getValue();
1246    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1247    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1248    return (Value >= 0 && Value < 256) ||
1249      (Value >= 0x0100 && Value <= 0xff00) ||
1250      (Value >= 0x010000 && Value <= 0xff0000) ||
1251      (Value >= 0x01000000 && Value <= 0xff000000) ||
1252      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1253      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1254  }
1255
1256  bool isNEONi64splat() const {
1257    if (!isImm()) return false;
1258    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1259    // Must be a constant.
1260    if (!CE) return false;
1261    uint64_t Value = CE->getValue();
1262    // i64 value with each byte being either 0 or 0xff.
1263    for (unsigned i = 0; i < 8; ++i)
1264      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1265    return true;
1266  }
1267
1268  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1269    // Add as immediates when possible.  Null MCExpr = 0.
1270    if (Expr == 0)
1271      Inst.addOperand(MCOperand::CreateImm(0));
1272    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1273      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1274    else
1275      Inst.addOperand(MCOperand::CreateExpr(Expr));
1276  }
1277
1278  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1279    assert(N == 2 && "Invalid number of operands!");
1280    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1281    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1282    Inst.addOperand(MCOperand::CreateReg(RegNum));
1283  }
1284
1285  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1286    assert(N == 1 && "Invalid number of operands!");
1287    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1288  }
1289
1290  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1291    assert(N == 1 && "Invalid number of operands!");
1292    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1293  }
1294
1295  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1296    assert(N == 1 && "Invalid number of operands!");
1297    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1298  }
1299
1300  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1301    assert(N == 1 && "Invalid number of operands!");
1302    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1303  }
1304
1305  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1306    assert(N == 1 && "Invalid number of operands!");
1307    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1308  }
1309
1310  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1311    assert(N == 1 && "Invalid number of operands!");
1312    Inst.addOperand(MCOperand::CreateReg(getReg()));
1313  }
1314
1315  void addRegOperands(MCInst &Inst, unsigned N) const {
1316    assert(N == 1 && "Invalid number of operands!");
1317    Inst.addOperand(MCOperand::CreateReg(getReg()));
1318  }
1319
1320  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1321    assert(N == 3 && "Invalid number of operands!");
1322    assert(isRegShiftedReg() &&
1323           "addRegShiftedRegOperands() on non RegShiftedReg!");
1324    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1325    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1326    Inst.addOperand(MCOperand::CreateImm(
1327      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1328  }
1329
1330  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1331    assert(N == 2 && "Invalid number of operands!");
1332    assert(isRegShiftedImm() &&
1333           "addRegShiftedImmOperands() on non RegShiftedImm!");
1334    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1335    Inst.addOperand(MCOperand::CreateImm(
1336      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1337  }
1338
1339  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1340    assert(N == 1 && "Invalid number of operands!");
1341    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1342                                         ShifterImm.Imm));
1343  }
1344
1345  void addRegListOperands(MCInst &Inst, unsigned N) const {
1346    assert(N == 1 && "Invalid number of operands!");
1347    const SmallVectorImpl<unsigned> &RegList = getRegList();
1348    for (SmallVectorImpl<unsigned>::const_iterator
1349           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1350      Inst.addOperand(MCOperand::CreateReg(*I));
1351  }
1352
1353  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1354    addRegListOperands(Inst, N);
1355  }
1356
1357  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1358    addRegListOperands(Inst, N);
1359  }
1360
1361  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1362    assert(N == 1 && "Invalid number of operands!");
1363    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1364    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1365  }
1366
1367  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1368    assert(N == 1 && "Invalid number of operands!");
1369    // Munge the lsb/width into a bitfield mask.
1370    unsigned lsb = Bitfield.LSB;
1371    unsigned width = Bitfield.Width;
1372    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1373    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1374                      (32 - (lsb + width)));
1375    Inst.addOperand(MCOperand::CreateImm(Mask));
1376  }
1377
1378  void addImmOperands(MCInst &Inst, unsigned N) const {
1379    assert(N == 1 && "Invalid number of operands!");
1380    addExpr(Inst, getImm());
1381  }
1382
1383  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1386    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1387  }
1388
1389  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1390    assert(N == 1 && "Invalid number of operands!");
1391    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1392    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1393  }
1394
1395  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1396    assert(N == 1 && "Invalid number of operands!");
1397    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1398  }
1399
1400  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1401    assert(N == 1 && "Invalid number of operands!");
1402    // FIXME: We really want to scale the value here, but the LDRD/STRD
1403    // instruction don't encode operands that way yet.
1404    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1405    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1406  }
1407
1408  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1409    assert(N == 1 && "Invalid number of operands!");
1410    // The immediate is scaled by four in the encoding and is stored
1411    // in the MCInst as such. Lop off the low two bits here.
1412    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1413    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1414  }
1415
1416  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1417    assert(N == 1 && "Invalid number of operands!");
1418    // The immediate is scaled by four in the encoding and is stored
1419    // in the MCInst as such. Lop off the low two bits here.
1420    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1421    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1422  }
1423
1424  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1425    assert(N == 1 && "Invalid number of operands!");
1426    // The constant encodes as the immediate-1, and we store in the instruction
1427    // the bits as encoded, so subtract off one here.
1428    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1429    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1430  }
1431
1432  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1433    assert(N == 1 && "Invalid number of operands!");
1434    // The constant encodes as the immediate-1, and we store in the instruction
1435    // the bits as encoded, so subtract off one here.
1436    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1437    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1438  }
1439
1440  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1441    assert(N == 1 && "Invalid number of operands!");
1442    // The constant encodes as the immediate, except for 32, which encodes as
1443    // zero.
1444    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1445    unsigned Imm = CE->getValue();
1446    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1447  }
1448
1449  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1450    assert(N == 1 && "Invalid number of operands!");
1451    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1452    // the instruction as well.
1453    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1454    int Val = CE->getValue();
1455    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1456  }
1457
1458  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1459    assert(N == 1 && "Invalid number of operands!");
1460    // The operand is actually a t2_so_imm, but we have its bitwise
1461    // negation in the assembly source, so twiddle it here.
1462    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1463    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1464  }
1465
1466  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1467    assert(N == 1 && "Invalid number of operands!");
1468    // The operand is actually a t2_so_imm, but we have its
1469    // negation in the assembly source, so twiddle it here.
1470    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1471    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1472  }
1473
1474  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1475    assert(N == 1 && "Invalid number of operands!");
1476    // The operand is actually a so_imm, but we have its bitwise
1477    // negation in the assembly source, so twiddle it here.
1478    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1479    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1480  }
1481
1482  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1483    assert(N == 1 && "Invalid number of operands!");
1484    // The operand is actually a so_imm, but we have its
1485    // negation in the assembly source, so twiddle it here.
1486    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1487    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1488  }
1489
1490  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1491    assert(N == 1 && "Invalid number of operands!");
1492    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1493  }
1494
1495  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1498  }
1499
1500  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1501    assert(N == 1 && "Invalid number of operands!");
1502    int32_t Imm = Memory.OffsetImm->getValue();
1503    // FIXME: Handle #-0
1504    if (Imm == INT32_MIN) Imm = 0;
1505    Inst.addOperand(MCOperand::CreateImm(Imm));
1506  }
1507
1508  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1509    assert(N == 2 && "Invalid number of operands!");
1510    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1511    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1512  }
1513
1514  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1515    assert(N == 3 && "Invalid number of operands!");
1516    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1517    if (!Memory.OffsetRegNum) {
1518      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1519      // Special case for #-0
1520      if (Val == INT32_MIN) Val = 0;
1521      if (Val < 0) Val = -Val;
1522      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1523    } else {
1524      // For register offset, we encode the shift type and negation flag
1525      // here.
1526      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1527                              Memory.ShiftImm, Memory.ShiftType);
1528    }
1529    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1530    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1531    Inst.addOperand(MCOperand::CreateImm(Val));
1532  }
1533
1534  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1535    assert(N == 2 && "Invalid number of operands!");
1536    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1537    assert(CE && "non-constant AM2OffsetImm operand!");
1538    int32_t Val = CE->getValue();
1539    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1540    // Special case for #-0
1541    if (Val == INT32_MIN) Val = 0;
1542    if (Val < 0) Val = -Val;
1543    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1544    Inst.addOperand(MCOperand::CreateReg(0));
1545    Inst.addOperand(MCOperand::CreateImm(Val));
1546  }
1547
1548  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1549    assert(N == 3 && "Invalid number of operands!");
1550    // If we have an immediate that's not a constant, treat it as a label
1551    // reference needing a fixup. If it is a constant, it's something else
1552    // and we reject it.
1553    if (isImm()) {
1554      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1555      Inst.addOperand(MCOperand::CreateReg(0));
1556      Inst.addOperand(MCOperand::CreateImm(0));
1557      return;
1558    }
1559
1560    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1561    if (!Memory.OffsetRegNum) {
1562      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1563      // Special case for #-0
1564      if (Val == INT32_MIN) Val = 0;
1565      if (Val < 0) Val = -Val;
1566      Val = ARM_AM::getAM3Opc(AddSub, Val);
1567    } else {
1568      // For register offset, we encode the shift type and negation flag
1569      // here.
1570      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1571    }
1572    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1573    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1574    Inst.addOperand(MCOperand::CreateImm(Val));
1575  }
1576
1577  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1578    assert(N == 2 && "Invalid number of operands!");
1579    if (Kind == k_PostIndexRegister) {
1580      int32_t Val =
1581        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1582      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1583      Inst.addOperand(MCOperand::CreateImm(Val));
1584      return;
1585    }
1586
1587    // Constant offset.
1588    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1589    int32_t Val = CE->getValue();
1590    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1591    // Special case for #-0
1592    if (Val == INT32_MIN) Val = 0;
1593    if (Val < 0) Val = -Val;
1594    Val = ARM_AM::getAM3Opc(AddSub, Val);
1595    Inst.addOperand(MCOperand::CreateReg(0));
1596    Inst.addOperand(MCOperand::CreateImm(Val));
1597  }
1598
1599  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1600    assert(N == 2 && "Invalid number of operands!");
1601    // If we have an immediate that's not a constant, treat it as a label
1602    // reference needing a fixup. If it is a constant, it's something else
1603    // and we reject it.
1604    if (isImm()) {
1605      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1606      Inst.addOperand(MCOperand::CreateImm(0));
1607      return;
1608    }
1609
1610    // The lower two bits are always zero and as such are not encoded.
1611    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1612    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1613    // Special case for #-0
1614    if (Val == INT32_MIN) Val = 0;
1615    if (Val < 0) Val = -Val;
1616    Val = ARM_AM::getAM5Opc(AddSub, Val);
1617    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1618    Inst.addOperand(MCOperand::CreateImm(Val));
1619  }
1620
1621  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1622    assert(N == 2 && "Invalid number of operands!");
1623    // If we have an immediate that's not a constant, treat it as a label
1624    // reference needing a fixup. If it is a constant, it's something else
1625    // and we reject it.
1626    if (isImm()) {
1627      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1628      Inst.addOperand(MCOperand::CreateImm(0));
1629      return;
1630    }
1631
1632    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1633    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1634    Inst.addOperand(MCOperand::CreateImm(Val));
1635  }
1636
1637  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1638    assert(N == 2 && "Invalid number of operands!");
1639    // The lower two bits are always zero and as such are not encoded.
1640    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1641    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1642    Inst.addOperand(MCOperand::CreateImm(Val));
1643  }
1644
1645  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1646    assert(N == 2 && "Invalid number of operands!");
1647    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1648    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1649    Inst.addOperand(MCOperand::CreateImm(Val));
1650  }
1651
1652  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1653    addMemImm8OffsetOperands(Inst, N);
1654  }
1655
1656  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1657    addMemImm8OffsetOperands(Inst, N);
1658  }
1659
1660  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1661    assert(N == 2 && "Invalid number of operands!");
1662    // If this is an immediate, it's a label reference.
1663    if (isImm()) {
1664      addExpr(Inst, getImm());
1665      Inst.addOperand(MCOperand::CreateImm(0));
1666      return;
1667    }
1668
1669    // Otherwise, it's a normal memory reg+offset.
1670    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1671    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1672    Inst.addOperand(MCOperand::CreateImm(Val));
1673  }
1674
1675  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1676    assert(N == 2 && "Invalid number of operands!");
1677    // If this is an immediate, it's a label reference.
1678    if (isImm()) {
1679      addExpr(Inst, getImm());
1680      Inst.addOperand(MCOperand::CreateImm(0));
1681      return;
1682    }
1683
1684    // Otherwise, it's a normal memory reg+offset.
1685    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1686    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1687    Inst.addOperand(MCOperand::CreateImm(Val));
1688  }
1689
1690  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1691    assert(N == 2 && "Invalid number of operands!");
1692    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1693    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1694  }
1695
1696  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1697    assert(N == 2 && "Invalid number of operands!");
1698    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1699    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1700  }
1701
1702  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1703    assert(N == 3 && "Invalid number of operands!");
1704    unsigned Val =
1705      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1706                        Memory.ShiftImm, Memory.ShiftType);
1707    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1708    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1709    Inst.addOperand(MCOperand::CreateImm(Val));
1710  }
1711
1712  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1713    assert(N == 3 && "Invalid number of operands!");
1714    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1715    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1716    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1717  }
1718
1719  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1720    assert(N == 2 && "Invalid number of operands!");
1721    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1722    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1723  }
1724
1725  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1726    assert(N == 2 && "Invalid number of operands!");
1727    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1728    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1729    Inst.addOperand(MCOperand::CreateImm(Val));
1730  }
1731
1732  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1733    assert(N == 2 && "Invalid number of operands!");
1734    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1735    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1736    Inst.addOperand(MCOperand::CreateImm(Val));
1737  }
1738
1739  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1740    assert(N == 2 && "Invalid number of operands!");
1741    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1742    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1743    Inst.addOperand(MCOperand::CreateImm(Val));
1744  }
1745
1746  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1747    assert(N == 2 && "Invalid number of operands!");
1748    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1749    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1750    Inst.addOperand(MCOperand::CreateImm(Val));
1751  }
1752
1753  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1754    assert(N == 1 && "Invalid number of operands!");
1755    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1756    assert(CE && "non-constant post-idx-imm8 operand!");
1757    int Imm = CE->getValue();
1758    bool isAdd = Imm >= 0;
1759    if (Imm == INT32_MIN) Imm = 0;
1760    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1761    Inst.addOperand(MCOperand::CreateImm(Imm));
1762  }
1763
1764  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1765    assert(N == 1 && "Invalid number of operands!");
1766    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1767    assert(CE && "non-constant post-idx-imm8s4 operand!");
1768    int Imm = CE->getValue();
1769    bool isAdd = Imm >= 0;
1770    if (Imm == INT32_MIN) Imm = 0;
1771    // Immediate is scaled by 4.
1772    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1773    Inst.addOperand(MCOperand::CreateImm(Imm));
1774  }
1775
1776  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1777    assert(N == 2 && "Invalid number of operands!");
1778    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1779    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1780  }
1781
1782  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1783    assert(N == 2 && "Invalid number of operands!");
1784    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1785    // The sign, shift type, and shift amount are encoded in a single operand
1786    // using the AM2 encoding helpers.
1787    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1788    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1789                                     PostIdxReg.ShiftTy);
1790    Inst.addOperand(MCOperand::CreateImm(Imm));
1791  }
1792
1793  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1794    assert(N == 1 && "Invalid number of operands!");
1795    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1796  }
1797
1798  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1799    assert(N == 1 && "Invalid number of operands!");
1800    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1801  }
1802
1803  void addVecListOperands(MCInst &Inst, unsigned N) const {
1804    assert(N == 1 && "Invalid number of operands!");
1805    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1806  }
1807
1808  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1809    assert(N == 2 && "Invalid number of operands!");
1810    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1811    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1812  }
1813
1814  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1815    assert(N == 1 && "Invalid number of operands!");
1816    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1817  }
1818
1819  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1820    assert(N == 1 && "Invalid number of operands!");
1821    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1822  }
1823
1824  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1825    assert(N == 1 && "Invalid number of operands!");
1826    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1827  }
1828
1829  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1830    assert(N == 1 && "Invalid number of operands!");
1831    // The immediate encodes the type of constant as well as the value.
1832    // Mask in that this is an i8 splat.
1833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1834    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1835  }
1836
1837  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1838    assert(N == 1 && "Invalid number of operands!");
1839    // The immediate encodes the type of constant as well as the value.
1840    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1841    unsigned Value = CE->getValue();
1842    if (Value >= 256)
1843      Value = (Value >> 8) | 0xa00;
1844    else
1845      Value |= 0x800;
1846    Inst.addOperand(MCOperand::CreateImm(Value));
1847  }
1848
1849  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1850    assert(N == 1 && "Invalid number of operands!");
1851    // The immediate encodes the type of constant as well as the value.
1852    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1853    unsigned Value = CE->getValue();
1854    if (Value >= 256 && Value <= 0xff00)
1855      Value = (Value >> 8) | 0x200;
1856    else if (Value > 0xffff && Value <= 0xff0000)
1857      Value = (Value >> 16) | 0x400;
1858    else if (Value > 0xffffff)
1859      Value = (Value >> 24) | 0x600;
1860    Inst.addOperand(MCOperand::CreateImm(Value));
1861  }
1862
1863  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1864    assert(N == 1 && "Invalid number of operands!");
1865    // The immediate encodes the type of constant as well as the value.
1866    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1867    unsigned Value = CE->getValue();
1868    if (Value >= 256 && Value <= 0xffff)
1869      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1870    else if (Value > 0xffff && Value <= 0xffffff)
1871      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1872    else if (Value > 0xffffff)
1873      Value = (Value >> 24) | 0x600;
1874    Inst.addOperand(MCOperand::CreateImm(Value));
1875  }
1876
1877  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1878    assert(N == 1 && "Invalid number of operands!");
1879    // The immediate encodes the type of constant as well as the value.
1880    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1881    unsigned Value = ~CE->getValue();
1882    if (Value >= 256 && Value <= 0xffff)
1883      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1884    else if (Value > 0xffff && Value <= 0xffffff)
1885      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1886    else if (Value > 0xffffff)
1887      Value = (Value >> 24) | 0x600;
1888    Inst.addOperand(MCOperand::CreateImm(Value));
1889  }
1890
1891  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1892    assert(N == 1 && "Invalid number of operands!");
1893    // The immediate encodes the type of constant as well as the value.
1894    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1895    uint64_t Value = CE->getValue();
1896    unsigned Imm = 0;
1897    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1898      Imm |= (Value & 1) << i;
1899    }
1900    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1901  }
1902
1903  virtual void print(raw_ostream &OS) const;
1904
1905  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1906    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1907    Op->ITMask.Mask = Mask;
1908    Op->StartLoc = S;
1909    Op->EndLoc = S;
1910    return Op;
1911  }
1912
1913  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1914    ARMOperand *Op = new ARMOperand(k_CondCode);
1915    Op->CC.Val = CC;
1916    Op->StartLoc = S;
1917    Op->EndLoc = S;
1918    return Op;
1919  }
1920
1921  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1922    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1923    Op->Cop.Val = CopVal;
1924    Op->StartLoc = S;
1925    Op->EndLoc = S;
1926    return Op;
1927  }
1928
1929  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1930    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1931    Op->Cop.Val = CopVal;
1932    Op->StartLoc = S;
1933    Op->EndLoc = S;
1934    return Op;
1935  }
1936
1937  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1938    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1939    Op->Cop.Val = Val;
1940    Op->StartLoc = S;
1941    Op->EndLoc = E;
1942    return Op;
1943  }
1944
1945  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1946    ARMOperand *Op = new ARMOperand(k_CCOut);
1947    Op->Reg.RegNum = RegNum;
1948    Op->StartLoc = S;
1949    Op->EndLoc = S;
1950    return Op;
1951  }
1952
1953  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1954    ARMOperand *Op = new ARMOperand(k_Token);
1955    Op->Tok.Data = Str.data();
1956    Op->Tok.Length = Str.size();
1957    Op->StartLoc = S;
1958    Op->EndLoc = S;
1959    return Op;
1960  }
1961
1962  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1963    ARMOperand *Op = new ARMOperand(k_Register);
1964    Op->Reg.RegNum = RegNum;
1965    Op->StartLoc = S;
1966    Op->EndLoc = E;
1967    return Op;
1968  }
1969
1970  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1971                                           unsigned SrcReg,
1972                                           unsigned ShiftReg,
1973                                           unsigned ShiftImm,
1974                                           SMLoc S, SMLoc E) {
1975    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1976    Op->RegShiftedReg.ShiftTy = ShTy;
1977    Op->RegShiftedReg.SrcReg = SrcReg;
1978    Op->RegShiftedReg.ShiftReg = ShiftReg;
1979    Op->RegShiftedReg.ShiftImm = ShiftImm;
1980    Op->StartLoc = S;
1981    Op->EndLoc = E;
1982    return Op;
1983  }
1984
1985  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1986                                            unsigned SrcReg,
1987                                            unsigned ShiftImm,
1988                                            SMLoc S, SMLoc E) {
1989    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1990    Op->RegShiftedImm.ShiftTy = ShTy;
1991    Op->RegShiftedImm.SrcReg = SrcReg;
1992    Op->RegShiftedImm.ShiftImm = ShiftImm;
1993    Op->StartLoc = S;
1994    Op->EndLoc = E;
1995    return Op;
1996  }
1997
1998  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1999                                   SMLoc S, SMLoc E) {
2000    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2001    Op->ShifterImm.isASR = isASR;
2002    Op->ShifterImm.Imm = Imm;
2003    Op->StartLoc = S;
2004    Op->EndLoc = E;
2005    return Op;
2006  }
2007
2008  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2009    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2010    Op->RotImm.Imm = Imm;
2011    Op->StartLoc = S;
2012    Op->EndLoc = E;
2013    return Op;
2014  }
2015
2016  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2017                                    SMLoc S, SMLoc E) {
2018    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2019    Op->Bitfield.LSB = LSB;
2020    Op->Bitfield.Width = Width;
2021    Op->StartLoc = S;
2022    Op->EndLoc = E;
2023    return Op;
2024  }
2025
2026  static ARMOperand *
2027  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2028                SMLoc StartLoc, SMLoc EndLoc) {
2029    KindTy Kind = k_RegisterList;
2030
2031    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2032      Kind = k_DPRRegisterList;
2033    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2034             contains(Regs.front().first))
2035      Kind = k_SPRRegisterList;
2036
2037    ARMOperand *Op = new ARMOperand(Kind);
2038    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2039           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2040      Op->Registers.push_back(I->first);
2041    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2042    Op->StartLoc = StartLoc;
2043    Op->EndLoc = EndLoc;
2044    return Op;
2045  }
2046
2047  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2048                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2049    ARMOperand *Op = new ARMOperand(k_VectorList);
2050    Op->VectorList.RegNum = RegNum;
2051    Op->VectorList.Count = Count;
2052    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2053    Op->StartLoc = S;
2054    Op->EndLoc = E;
2055    return Op;
2056  }
2057
2058  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2059                                              bool isDoubleSpaced,
2060                                              SMLoc S, SMLoc E) {
2061    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2062    Op->VectorList.RegNum = RegNum;
2063    Op->VectorList.Count = Count;
2064    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2065    Op->StartLoc = S;
2066    Op->EndLoc = E;
2067    return Op;
2068  }
2069
2070  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2071                                             unsigned Index,
2072                                             bool isDoubleSpaced,
2073                                             SMLoc S, SMLoc E) {
2074    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2075    Op->VectorList.RegNum = RegNum;
2076    Op->VectorList.Count = Count;
2077    Op->VectorList.LaneIndex = Index;
2078    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2079    Op->StartLoc = S;
2080    Op->EndLoc = E;
2081    return Op;
2082  }
2083
2084  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2085                                       MCContext &Ctx) {
2086    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2087    Op->VectorIndex.Val = Idx;
2088    Op->StartLoc = S;
2089    Op->EndLoc = E;
2090    return Op;
2091  }
2092
2093  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2094    ARMOperand *Op = new ARMOperand(k_Immediate);
2095    Op->Imm.Val = Val;
2096    Op->StartLoc = S;
2097    Op->EndLoc = E;
2098    return Op;
2099  }
2100
2101  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2102    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2103    Op->FPImm.Val = Val;
2104    Op->StartLoc = S;
2105    Op->EndLoc = S;
2106    return Op;
2107  }
2108
2109  static ARMOperand *CreateMem(unsigned BaseRegNum,
2110                               const MCConstantExpr *OffsetImm,
2111                               unsigned OffsetRegNum,
2112                               ARM_AM::ShiftOpc ShiftType,
2113                               unsigned ShiftImm,
2114                               unsigned Alignment,
2115                               bool isNegative,
2116                               SMLoc S, SMLoc E) {
2117    ARMOperand *Op = new ARMOperand(k_Memory);
2118    Op->Memory.BaseRegNum = BaseRegNum;
2119    Op->Memory.OffsetImm = OffsetImm;
2120    Op->Memory.OffsetRegNum = OffsetRegNum;
2121    Op->Memory.ShiftType = ShiftType;
2122    Op->Memory.ShiftImm = ShiftImm;
2123    Op->Memory.Alignment = Alignment;
2124    Op->Memory.isNegative = isNegative;
2125    Op->StartLoc = S;
2126    Op->EndLoc = E;
2127    return Op;
2128  }
2129
2130  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2131                                      ARM_AM::ShiftOpc ShiftTy,
2132                                      unsigned ShiftImm,
2133                                      SMLoc S, SMLoc E) {
2134    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2135    Op->PostIdxReg.RegNum = RegNum;
2136    Op->PostIdxReg.isAdd = isAdd;
2137    Op->PostIdxReg.ShiftTy = ShiftTy;
2138    Op->PostIdxReg.ShiftImm = ShiftImm;
2139    Op->StartLoc = S;
2140    Op->EndLoc = E;
2141    return Op;
2142  }
2143
2144  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2145    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2146    Op->MBOpt.Val = Opt;
2147    Op->StartLoc = S;
2148    Op->EndLoc = S;
2149    return Op;
2150  }
2151
2152  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2153    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2154    Op->IFlags.Val = IFlags;
2155    Op->StartLoc = S;
2156    Op->EndLoc = S;
2157    return Op;
2158  }
2159
2160  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2161    ARMOperand *Op = new ARMOperand(k_MSRMask);
2162    Op->MMask.Val = MMask;
2163    Op->StartLoc = S;
2164    Op->EndLoc = S;
2165    return Op;
2166  }
2167};
2168
2169} // end anonymous namespace.
2170
2171void ARMOperand::print(raw_ostream &OS) const {
2172  switch (Kind) {
2173  case k_FPImmediate:
2174    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2175       << ") >";
2176    break;
2177  case k_CondCode:
2178    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2179    break;
2180  case k_CCOut:
2181    OS << "<ccout " << getReg() << ">";
2182    break;
2183  case k_ITCondMask: {
2184    static const char *MaskStr[] = {
2185      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2186      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2187    };
2188    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2189    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2190    break;
2191  }
2192  case k_CoprocNum:
2193    OS << "<coprocessor number: " << getCoproc() << ">";
2194    break;
2195  case k_CoprocReg:
2196    OS << "<coprocessor register: " << getCoproc() << ">";
2197    break;
2198  case k_CoprocOption:
2199    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2200    break;
2201  case k_MSRMask:
2202    OS << "<mask: " << getMSRMask() << ">";
2203    break;
2204  case k_Immediate:
2205    getImm()->print(OS);
2206    break;
2207  case k_MemBarrierOpt:
2208    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2209    break;
2210  case k_Memory:
2211    OS << "<memory "
2212       << " base:" << Memory.BaseRegNum;
2213    OS << ">";
2214    break;
2215  case k_PostIndexRegister:
2216    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2217       << PostIdxReg.RegNum;
2218    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2219      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2220         << PostIdxReg.ShiftImm;
2221    OS << ">";
2222    break;
2223  case k_ProcIFlags: {
2224    OS << "<ARM_PROC::";
2225    unsigned IFlags = getProcIFlags();
2226    for (int i=2; i >= 0; --i)
2227      if (IFlags & (1 << i))
2228        OS << ARM_PROC::IFlagsToString(1 << i);
2229    OS << ">";
2230    break;
2231  }
2232  case k_Register:
2233    OS << "<register " << getReg() << ">";
2234    break;
2235  case k_ShifterImmediate:
2236    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2237       << " #" << ShifterImm.Imm << ">";
2238    break;
2239  case k_ShiftedRegister:
2240    OS << "<so_reg_reg "
2241       << RegShiftedReg.SrcReg << " "
2242       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2243       << " " << RegShiftedReg.ShiftReg << ">";
2244    break;
2245  case k_ShiftedImmediate:
2246    OS << "<so_reg_imm "
2247       << RegShiftedImm.SrcReg << " "
2248       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2249       << " #" << RegShiftedImm.ShiftImm << ">";
2250    break;
2251  case k_RotateImmediate:
2252    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2253    break;
2254  case k_BitfieldDescriptor:
2255    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2256       << ", width: " << Bitfield.Width << ">";
2257    break;
2258  case k_RegisterList:
2259  case k_DPRRegisterList:
2260  case k_SPRRegisterList: {
2261    OS << "<register_list ";
2262
2263    const SmallVectorImpl<unsigned> &RegList = getRegList();
2264    for (SmallVectorImpl<unsigned>::const_iterator
2265           I = RegList.begin(), E = RegList.end(); I != E; ) {
2266      OS << *I;
2267      if (++I < E) OS << ", ";
2268    }
2269
2270    OS << ">";
2271    break;
2272  }
2273  case k_VectorList:
2274    OS << "<vector_list " << VectorList.Count << " * "
2275       << VectorList.RegNum << ">";
2276    break;
2277  case k_VectorListAllLanes:
2278    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2279       << VectorList.RegNum << ">";
2280    break;
2281  case k_VectorListIndexed:
2282    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2283       << VectorList.Count << " * " << VectorList.RegNum << ">";
2284    break;
2285  case k_Token:
2286    OS << "'" << getToken() << "'";
2287    break;
2288  case k_VectorIndex:
2289    OS << "<vectorindex " << getVectorIndex() << ">";
2290    break;
2291  }
2292}
2293
2294/// @name Auto-generated Match Functions
2295/// {
2296
2297static unsigned MatchRegisterName(StringRef Name);
2298
2299/// }
2300
2301bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2302                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2303  StartLoc = Parser.getTok().getLoc();
2304  RegNo = tryParseRegister();
2305  EndLoc = Parser.getTok().getLoc();
2306
2307  return (RegNo == (unsigned)-1);
2308}
2309
2310/// Try to parse a register name.  The token must be an Identifier when called,
2311/// and if it is a register name the token is eaten and the register number is
2312/// returned.  Otherwise return -1.
2313///
2314int ARMAsmParser::tryParseRegister() {
2315  const AsmToken &Tok = Parser.getTok();
2316  if (Tok.isNot(AsmToken::Identifier)) return -1;
2317
2318  std::string lowerCase = Tok.getString().lower();
2319  unsigned RegNum = MatchRegisterName(lowerCase);
2320  if (!RegNum) {
2321    RegNum = StringSwitch<unsigned>(lowerCase)
2322      .Case("r13", ARM::SP)
2323      .Case("r14", ARM::LR)
2324      .Case("r15", ARM::PC)
2325      .Case("ip", ARM::R12)
2326      // Additional register name aliases for 'gas' compatibility.
2327      .Case("a1", ARM::R0)
2328      .Case("a2", ARM::R1)
2329      .Case("a3", ARM::R2)
2330      .Case("a4", ARM::R3)
2331      .Case("v1", ARM::R4)
2332      .Case("v2", ARM::R5)
2333      .Case("v3", ARM::R6)
2334      .Case("v4", ARM::R7)
2335      .Case("v5", ARM::R8)
2336      .Case("v6", ARM::R9)
2337      .Case("v7", ARM::R10)
2338      .Case("v8", ARM::R11)
2339      .Case("sb", ARM::R9)
2340      .Case("sl", ARM::R10)
2341      .Case("fp", ARM::R11)
2342      .Default(0);
2343  }
2344  if (!RegNum) {
2345    // Check for aliases registered via .req. Canonicalize to lower case.
2346    // That's more consistent since register names are case insensitive, and
2347    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2348    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2349    // If no match, return failure.
2350    if (Entry == RegisterReqs.end())
2351      return -1;
2352    Parser.Lex(); // Eat identifier token.
2353    return Entry->getValue();
2354  }
2355
2356  Parser.Lex(); // Eat identifier token.
2357
2358  return RegNum;
2359}
2360
2361// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2362// If a recoverable error occurs, return 1. If an irrecoverable error
2363// occurs, return -1. An irrecoverable error is one where tokens have been
2364// consumed in the process of trying to parse the shifter (i.e., when it is
2365// indeed a shifter operand, but malformed).
2366int ARMAsmParser::tryParseShiftRegister(
2367                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2368  SMLoc S = Parser.getTok().getLoc();
2369  const AsmToken &Tok = Parser.getTok();
2370  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2371
2372  std::string lowerCase = Tok.getString().lower();
2373  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2374      .Case("asl", ARM_AM::lsl)
2375      .Case("lsl", ARM_AM::lsl)
2376      .Case("lsr", ARM_AM::lsr)
2377      .Case("asr", ARM_AM::asr)
2378      .Case("ror", ARM_AM::ror)
2379      .Case("rrx", ARM_AM::rrx)
2380      .Default(ARM_AM::no_shift);
2381
2382  if (ShiftTy == ARM_AM::no_shift)
2383    return 1;
2384
2385  Parser.Lex(); // Eat the operator.
2386
2387  // The source register for the shift has already been added to the
2388  // operand list, so we need to pop it off and combine it into the shifted
2389  // register operand instead.
2390  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2391  if (!PrevOp->isReg())
2392    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2393  int SrcReg = PrevOp->getReg();
2394  int64_t Imm = 0;
2395  int ShiftReg = 0;
2396  if (ShiftTy == ARM_AM::rrx) {
2397    // RRX Doesn't have an explicit shift amount. The encoder expects
2398    // the shift register to be the same as the source register. Seems odd,
2399    // but OK.
2400    ShiftReg = SrcReg;
2401  } else {
2402    // Figure out if this is shifted by a constant or a register (for non-RRX).
2403    if (Parser.getTok().is(AsmToken::Hash) ||
2404        Parser.getTok().is(AsmToken::Dollar)) {
2405      Parser.Lex(); // Eat hash.
2406      SMLoc ImmLoc = Parser.getTok().getLoc();
2407      const MCExpr *ShiftExpr = 0;
2408      if (getParser().ParseExpression(ShiftExpr)) {
2409        Error(ImmLoc, "invalid immediate shift value");
2410        return -1;
2411      }
2412      // The expression must be evaluatable as an immediate.
2413      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2414      if (!CE) {
2415        Error(ImmLoc, "invalid immediate shift value");
2416        return -1;
2417      }
2418      // Range check the immediate.
2419      // lsl, ror: 0 <= imm <= 31
2420      // lsr, asr: 0 <= imm <= 32
2421      Imm = CE->getValue();
2422      if (Imm < 0 ||
2423          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2424          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2425        Error(ImmLoc, "immediate shift value out of range");
2426        return -1;
2427      }
2428      // shift by zero is a nop. Always send it through as lsl.
2429      // ('as' compatibility)
2430      if (Imm == 0)
2431        ShiftTy = ARM_AM::lsl;
2432    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2433      ShiftReg = tryParseRegister();
2434      SMLoc L = Parser.getTok().getLoc();
2435      if (ShiftReg == -1) {
2436        Error (L, "expected immediate or register in shift operand");
2437        return -1;
2438      }
2439    } else {
2440      Error (Parser.getTok().getLoc(),
2441                    "expected immediate or register in shift operand");
2442      return -1;
2443    }
2444  }
2445
2446  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2447    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2448                                                         ShiftReg, Imm,
2449                                               S, Parser.getTok().getLoc()));
2450  else
2451    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2452                                               S, Parser.getTok().getLoc()));
2453
2454  return 0;
2455}
2456
2457
2458/// Try to parse a register name.  The token must be an Identifier when called.
2459/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2460/// if there is a "writeback". 'true' if it's not a register.
2461///
2462/// TODO this is likely to change to allow different register types and or to
2463/// parse for a specific register type.
2464bool ARMAsmParser::
2465tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2466  SMLoc S = Parser.getTok().getLoc();
2467  int RegNo = tryParseRegister();
2468  if (RegNo == -1)
2469    return true;
2470
2471  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2472
2473  const AsmToken &ExclaimTok = Parser.getTok();
2474  if (ExclaimTok.is(AsmToken::Exclaim)) {
2475    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2476                                               ExclaimTok.getLoc()));
2477    Parser.Lex(); // Eat exclaim token
2478    return false;
2479  }
2480
2481  // Also check for an index operand. This is only legal for vector registers,
2482  // but that'll get caught OK in operand matching, so we don't need to
2483  // explicitly filter everything else out here.
2484  if (Parser.getTok().is(AsmToken::LBrac)) {
2485    SMLoc SIdx = Parser.getTok().getLoc();
2486    Parser.Lex(); // Eat left bracket token.
2487
2488    const MCExpr *ImmVal;
2489    if (getParser().ParseExpression(ImmVal))
2490      return MatchOperand_ParseFail;
2491    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2492    if (!MCE) {
2493      TokError("immediate value expected for vector index");
2494      return MatchOperand_ParseFail;
2495    }
2496
2497    SMLoc E = Parser.getTok().getLoc();
2498    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2499      Error(E, "']' expected");
2500      return MatchOperand_ParseFail;
2501    }
2502
2503    Parser.Lex(); // Eat right bracket token.
2504
2505    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2506                                                     SIdx, E,
2507                                                     getContext()));
2508  }
2509
2510  return false;
2511}
2512
2513/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2514/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2515/// "c5", ...
2516static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2517  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2518  // but efficient.
2519  switch (Name.size()) {
2520  default: break;
2521  case 2:
2522    if (Name[0] != CoprocOp)
2523      return -1;
2524    switch (Name[1]) {
2525    default:  return -1;
2526    case '0': return 0;
2527    case '1': return 1;
2528    case '2': return 2;
2529    case '3': return 3;
2530    case '4': return 4;
2531    case '5': return 5;
2532    case '6': return 6;
2533    case '7': return 7;
2534    case '8': return 8;
2535    case '9': return 9;
2536    }
2537    break;
2538  case 3:
2539    if (Name[0] != CoprocOp || Name[1] != '1')
2540      return -1;
2541    switch (Name[2]) {
2542    default:  return -1;
2543    case '0': return 10;
2544    case '1': return 11;
2545    case '2': return 12;
2546    case '3': return 13;
2547    case '4': return 14;
2548    case '5': return 15;
2549    }
2550    break;
2551  }
2552
2553  return -1;
2554}
2555
2556/// parseITCondCode - Try to parse a condition code for an IT instruction.
2557ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2558parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2559  SMLoc S = Parser.getTok().getLoc();
2560  const AsmToken &Tok = Parser.getTok();
2561  if (!Tok.is(AsmToken::Identifier))
2562    return MatchOperand_NoMatch;
2563  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2564    .Case("eq", ARMCC::EQ)
2565    .Case("ne", ARMCC::NE)
2566    .Case("hs", ARMCC::HS)
2567    .Case("cs", ARMCC::HS)
2568    .Case("lo", ARMCC::LO)
2569    .Case("cc", ARMCC::LO)
2570    .Case("mi", ARMCC::MI)
2571    .Case("pl", ARMCC::PL)
2572    .Case("vs", ARMCC::VS)
2573    .Case("vc", ARMCC::VC)
2574    .Case("hi", ARMCC::HI)
2575    .Case("ls", ARMCC::LS)
2576    .Case("ge", ARMCC::GE)
2577    .Case("lt", ARMCC::LT)
2578    .Case("gt", ARMCC::GT)
2579    .Case("le", ARMCC::LE)
2580    .Case("al", ARMCC::AL)
2581    .Default(~0U);
2582  if (CC == ~0U)
2583    return MatchOperand_NoMatch;
2584  Parser.Lex(); // Eat the token.
2585
2586  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2587
2588  return MatchOperand_Success;
2589}
2590
2591/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2592/// token must be an Identifier when called, and if it is a coprocessor
2593/// number, the token is eaten and the operand is added to the operand list.
2594ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2595parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2596  SMLoc S = Parser.getTok().getLoc();
2597  const AsmToken &Tok = Parser.getTok();
2598  if (Tok.isNot(AsmToken::Identifier))
2599    return MatchOperand_NoMatch;
2600
2601  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2602  if (Num == -1)
2603    return MatchOperand_NoMatch;
2604
2605  Parser.Lex(); // Eat identifier token.
2606  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2607  return MatchOperand_Success;
2608}
2609
2610/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2611/// token must be an Identifier when called, and if it is a coprocessor
2612/// number, the token is eaten and the operand is added to the operand list.
2613ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2614parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2615  SMLoc S = Parser.getTok().getLoc();
2616  const AsmToken &Tok = Parser.getTok();
2617  if (Tok.isNot(AsmToken::Identifier))
2618    return MatchOperand_NoMatch;
2619
2620  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2621  if (Reg == -1)
2622    return MatchOperand_NoMatch;
2623
2624  Parser.Lex(); // Eat identifier token.
2625  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2626  return MatchOperand_Success;
2627}
2628
2629/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2630/// coproc_option : '{' imm0_255 '}'
2631ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2632parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2633  SMLoc S = Parser.getTok().getLoc();
2634
2635  // If this isn't a '{', this isn't a coprocessor immediate operand.
2636  if (Parser.getTok().isNot(AsmToken::LCurly))
2637    return MatchOperand_NoMatch;
2638  Parser.Lex(); // Eat the '{'
2639
2640  const MCExpr *Expr;
2641  SMLoc Loc = Parser.getTok().getLoc();
2642  if (getParser().ParseExpression(Expr)) {
2643    Error(Loc, "illegal expression");
2644    return MatchOperand_ParseFail;
2645  }
2646  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2647  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2648    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2649    return MatchOperand_ParseFail;
2650  }
2651  int Val = CE->getValue();
2652
2653  // Check for and consume the closing '}'
2654  if (Parser.getTok().isNot(AsmToken::RCurly))
2655    return MatchOperand_ParseFail;
2656  SMLoc E = Parser.getTok().getLoc();
2657  Parser.Lex(); // Eat the '}'
2658
2659  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2660  return MatchOperand_Success;
2661}
2662
2663// For register list parsing, we need to map from raw GPR register numbering
2664// to the enumeration values. The enumeration values aren't sorted by
2665// register number due to our using "sp", "lr" and "pc" as canonical names.
2666static unsigned getNextRegister(unsigned Reg) {
2667  // If this is a GPR, we need to do it manually, otherwise we can rely
2668  // on the sort ordering of the enumeration since the other reg-classes
2669  // are sane.
2670  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2671    return Reg + 1;
2672  switch(Reg) {
2673  default: assert(0 && "Invalid GPR number!");
2674  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2675  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2676  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2677  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2678  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2679  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2680  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2681  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2682  }
2683}
2684
2685// Return the low-subreg of a given Q register.
2686static unsigned getDRegFromQReg(unsigned QReg) {
2687  switch (QReg) {
2688  default: llvm_unreachable("expected a Q register!");
2689  case ARM::Q0:  return ARM::D0;
2690  case ARM::Q1:  return ARM::D2;
2691  case ARM::Q2:  return ARM::D4;
2692  case ARM::Q3:  return ARM::D6;
2693  case ARM::Q4:  return ARM::D8;
2694  case ARM::Q5:  return ARM::D10;
2695  case ARM::Q6:  return ARM::D12;
2696  case ARM::Q7:  return ARM::D14;
2697  case ARM::Q8:  return ARM::D16;
2698  case ARM::Q9:  return ARM::D18;
2699  case ARM::Q10: return ARM::D20;
2700  case ARM::Q11: return ARM::D22;
2701  case ARM::Q12: return ARM::D24;
2702  case ARM::Q13: return ARM::D26;
2703  case ARM::Q14: return ARM::D28;
2704  case ARM::Q15: return ARM::D30;
2705  }
2706}
2707
2708/// Parse a register list.
2709bool ARMAsmParser::
2710parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2711  assert(Parser.getTok().is(AsmToken::LCurly) &&
2712         "Token is not a Left Curly Brace");
2713  SMLoc S = Parser.getTok().getLoc();
2714  Parser.Lex(); // Eat '{' token.
2715  SMLoc RegLoc = Parser.getTok().getLoc();
2716
2717  // Check the first register in the list to see what register class
2718  // this is a list of.
2719  int Reg = tryParseRegister();
2720  if (Reg == -1)
2721    return Error(RegLoc, "register expected");
2722
2723  // The reglist instructions have at most 16 registers, so reserve
2724  // space for that many.
2725  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2726
2727  // Allow Q regs and just interpret them as the two D sub-registers.
2728  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2729    Reg = getDRegFromQReg(Reg);
2730    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2731    ++Reg;
2732  }
2733  const MCRegisterClass *RC;
2734  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2735    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2736  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2737    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2738  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2739    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2740  else
2741    return Error(RegLoc, "invalid register in register list");
2742
2743  // Store the register.
2744  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2745
2746  // This starts immediately after the first register token in the list,
2747  // so we can see either a comma or a minus (range separator) as a legal
2748  // next token.
2749  while (Parser.getTok().is(AsmToken::Comma) ||
2750         Parser.getTok().is(AsmToken::Minus)) {
2751    if (Parser.getTok().is(AsmToken::Minus)) {
2752      Parser.Lex(); // Eat the minus.
2753      SMLoc EndLoc = Parser.getTok().getLoc();
2754      int EndReg = tryParseRegister();
2755      if (EndReg == -1)
2756        return Error(EndLoc, "register expected");
2757      // Allow Q regs and just interpret them as the two D sub-registers.
2758      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2759        EndReg = getDRegFromQReg(EndReg) + 1;
2760      // If the register is the same as the start reg, there's nothing
2761      // more to do.
2762      if (Reg == EndReg)
2763        continue;
2764      // The register must be in the same register class as the first.
2765      if (!RC->contains(EndReg))
2766        return Error(EndLoc, "invalid register in register list");
2767      // Ranges must go from low to high.
2768      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2769        return Error(EndLoc, "bad range in register list");
2770
2771      // Add all the registers in the range to the register list.
2772      while (Reg != EndReg) {
2773        Reg = getNextRegister(Reg);
2774        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2775      }
2776      continue;
2777    }
2778    Parser.Lex(); // Eat the comma.
2779    RegLoc = Parser.getTok().getLoc();
2780    int OldReg = Reg;
2781    const AsmToken RegTok = Parser.getTok();
2782    Reg = tryParseRegister();
2783    if (Reg == -1)
2784      return Error(RegLoc, "register expected");
2785    // Allow Q regs and just interpret them as the two D sub-registers.
2786    bool isQReg = false;
2787    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2788      Reg = getDRegFromQReg(Reg);
2789      isQReg = true;
2790    }
2791    // The register must be in the same register class as the first.
2792    if (!RC->contains(Reg))
2793      return Error(RegLoc, "invalid register in register list");
2794    // List must be monotonically increasing.
2795    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2796      return Error(RegLoc, "register list not in ascending order");
2797    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2798      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2799              ") in register list");
2800      continue;
2801    }
2802    // VFP register lists must also be contiguous.
2803    // It's OK to use the enumeration values directly here rather, as the
2804    // VFP register classes have the enum sorted properly.
2805    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2806        Reg != OldReg + 1)
2807      return Error(RegLoc, "non-contiguous register range");
2808    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2809    if (isQReg)
2810      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2811  }
2812
2813  SMLoc E = Parser.getTok().getLoc();
2814  if (Parser.getTok().isNot(AsmToken::RCurly))
2815    return Error(E, "'}' expected");
2816  Parser.Lex(); // Eat '}' token.
2817
2818  // Push the register list operand.
2819  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2820
2821  // The ARM system instruction variants for LDM/STM have a '^' token here.
2822  if (Parser.getTok().is(AsmToken::Caret)) {
2823    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2824    Parser.Lex(); // Eat '^' token.
2825  }
2826
2827  return false;
2828}
2829
2830// Helper function to parse the lane index for vector lists.
2831ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2832parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2833  Index = 0; // Always return a defined index value.
2834  if (Parser.getTok().is(AsmToken::LBrac)) {
2835    Parser.Lex(); // Eat the '['.
2836    if (Parser.getTok().is(AsmToken::RBrac)) {
2837      // "Dn[]" is the 'all lanes' syntax.
2838      LaneKind = AllLanes;
2839      Parser.Lex(); // Eat the ']'.
2840      return MatchOperand_Success;
2841    }
2842    const MCExpr *LaneIndex;
2843    SMLoc Loc = Parser.getTok().getLoc();
2844    if (getParser().ParseExpression(LaneIndex)) {
2845      Error(Loc, "illegal expression");
2846      return MatchOperand_ParseFail;
2847    }
2848    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2849    if (!CE) {
2850      Error(Loc, "lane index must be empty or an integer");
2851      return MatchOperand_ParseFail;
2852    }
2853    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2854      Error(Parser.getTok().getLoc(), "']' expected");
2855      return MatchOperand_ParseFail;
2856    }
2857    Parser.Lex(); // Eat the ']'.
2858    int64_t Val = CE->getValue();
2859
2860    // FIXME: Make this range check context sensitive for .8, .16, .32.
2861    if (Val < 0 || Val > 7) {
2862      Error(Parser.getTok().getLoc(), "lane index out of range");
2863      return MatchOperand_ParseFail;
2864    }
2865    Index = Val;
2866    LaneKind = IndexedLane;
2867    return MatchOperand_Success;
2868  }
2869  LaneKind = NoLanes;
2870  return MatchOperand_Success;
2871}
2872
2873// parse a vector register list
2874ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2875parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2876  VectorLaneTy LaneKind;
2877  unsigned LaneIndex;
2878  SMLoc S = Parser.getTok().getLoc();
2879  // As an extension (to match gas), support a plain D register or Q register
2880  // (without encosing curly braces) as a single or double entry list,
2881  // respectively.
2882  if (Parser.getTok().is(AsmToken::Identifier)) {
2883    int Reg = tryParseRegister();
2884    if (Reg == -1)
2885      return MatchOperand_NoMatch;
2886    SMLoc E = Parser.getTok().getLoc();
2887    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2888      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2889      if (Res != MatchOperand_Success)
2890        return Res;
2891      switch (LaneKind) {
2892      case NoLanes:
2893        E = Parser.getTok().getLoc();
2894        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2895        break;
2896      case AllLanes:
2897        E = Parser.getTok().getLoc();
2898        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2899                                                                S, E));
2900        break;
2901      case IndexedLane:
2902        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2903                                                               LaneIndex,
2904                                                               false, S, E));
2905        break;
2906      }
2907      return MatchOperand_Success;
2908    }
2909    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2910      Reg = getDRegFromQReg(Reg);
2911      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2912      if (Res != MatchOperand_Success)
2913        return Res;
2914      switch (LaneKind) {
2915      case NoLanes:
2916        E = Parser.getTok().getLoc();
2917        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2918        break;
2919      case AllLanes:
2920        E = Parser.getTok().getLoc();
2921        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2922                                                                S, E));
2923        break;
2924      case IndexedLane:
2925        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2926                                                               LaneIndex,
2927                                                               false, S, E));
2928        break;
2929      }
2930      return MatchOperand_Success;
2931    }
2932    Error(S, "vector register expected");
2933    return MatchOperand_ParseFail;
2934  }
2935
2936  if (Parser.getTok().isNot(AsmToken::LCurly))
2937    return MatchOperand_NoMatch;
2938
2939  Parser.Lex(); // Eat '{' token.
2940  SMLoc RegLoc = Parser.getTok().getLoc();
2941
2942  int Reg = tryParseRegister();
2943  if (Reg == -1) {
2944    Error(RegLoc, "register expected");
2945    return MatchOperand_ParseFail;
2946  }
2947  unsigned Count = 1;
2948  int Spacing = 0;
2949  unsigned FirstReg = Reg;
2950  // The list is of D registers, but we also allow Q regs and just interpret
2951  // them as the two D sub-registers.
2952  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2953    FirstReg = Reg = getDRegFromQReg(Reg);
2954    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2955                 // it's ambiguous with four-register single spaced.
2956    ++Reg;
2957    ++Count;
2958  }
2959  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2960    return MatchOperand_ParseFail;
2961
2962  while (Parser.getTok().is(AsmToken::Comma) ||
2963         Parser.getTok().is(AsmToken::Minus)) {
2964    if (Parser.getTok().is(AsmToken::Minus)) {
2965      if (!Spacing)
2966        Spacing = 1; // Register range implies a single spaced list.
2967      else if (Spacing == 2) {
2968        Error(Parser.getTok().getLoc(),
2969              "sequential registers in double spaced list");
2970        return MatchOperand_ParseFail;
2971      }
2972      Parser.Lex(); // Eat the minus.
2973      SMLoc EndLoc = Parser.getTok().getLoc();
2974      int EndReg = tryParseRegister();
2975      if (EndReg == -1) {
2976        Error(EndLoc, "register expected");
2977        return MatchOperand_ParseFail;
2978      }
2979      // Allow Q regs and just interpret them as the two D sub-registers.
2980      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2981        EndReg = getDRegFromQReg(EndReg) + 1;
2982      // If the register is the same as the start reg, there's nothing
2983      // more to do.
2984      if (Reg == EndReg)
2985        continue;
2986      // The register must be in the same register class as the first.
2987      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2988        Error(EndLoc, "invalid register in register list");
2989        return MatchOperand_ParseFail;
2990      }
2991      // Ranges must go from low to high.
2992      if (Reg > EndReg) {
2993        Error(EndLoc, "bad range in register list");
2994        return MatchOperand_ParseFail;
2995      }
2996      // Parse the lane specifier if present.
2997      VectorLaneTy NextLaneKind;
2998      unsigned NextLaneIndex;
2999      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3000        return MatchOperand_ParseFail;
3001      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3002        Error(EndLoc, "mismatched lane index in register list");
3003        return MatchOperand_ParseFail;
3004      }
3005      EndLoc = Parser.getTok().getLoc();
3006
3007      // Add all the registers in the range to the register list.
3008      Count += EndReg - Reg;
3009      Reg = EndReg;
3010      continue;
3011    }
3012    Parser.Lex(); // Eat the comma.
3013    RegLoc = Parser.getTok().getLoc();
3014    int OldReg = Reg;
3015    Reg = tryParseRegister();
3016    if (Reg == -1) {
3017      Error(RegLoc, "register expected");
3018      return MatchOperand_ParseFail;
3019    }
3020    // vector register lists must be contiguous.
3021    // It's OK to use the enumeration values directly here rather, as the
3022    // VFP register classes have the enum sorted properly.
3023    //
3024    // The list is of D registers, but we also allow Q regs and just interpret
3025    // them as the two D sub-registers.
3026    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3027      if (!Spacing)
3028        Spacing = 1; // Register range implies a single spaced list.
3029      else if (Spacing == 2) {
3030        Error(RegLoc,
3031              "invalid register in double-spaced list (must be 'D' register')");
3032        return MatchOperand_ParseFail;
3033      }
3034      Reg = getDRegFromQReg(Reg);
3035      if (Reg != OldReg + 1) {
3036        Error(RegLoc, "non-contiguous register range");
3037        return MatchOperand_ParseFail;
3038      }
3039      ++Reg;
3040      Count += 2;
3041      // Parse the lane specifier if present.
3042      VectorLaneTy NextLaneKind;
3043      unsigned NextLaneIndex;
3044      SMLoc EndLoc = Parser.getTok().getLoc();
3045      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3046        return MatchOperand_ParseFail;
3047      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3048        Error(EndLoc, "mismatched lane index in register list");
3049        return MatchOperand_ParseFail;
3050      }
3051      continue;
3052    }
3053    // Normal D register.
3054    // Figure out the register spacing (single or double) of the list if
3055    // we don't know it already.
3056    if (!Spacing)
3057      Spacing = 1 + (Reg == OldReg + 2);
3058
3059    // Just check that it's contiguous and keep going.
3060    if (Reg != OldReg + Spacing) {
3061      Error(RegLoc, "non-contiguous register range");
3062      return MatchOperand_ParseFail;
3063    }
3064    ++Count;
3065    // Parse the lane specifier if present.
3066    VectorLaneTy NextLaneKind;
3067    unsigned NextLaneIndex;
3068    SMLoc EndLoc = Parser.getTok().getLoc();
3069    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3070      return MatchOperand_ParseFail;
3071    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3072      Error(EndLoc, "mismatched lane index in register list");
3073      return MatchOperand_ParseFail;
3074    }
3075  }
3076
3077  SMLoc E = Parser.getTok().getLoc();
3078  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3079    Error(E, "'}' expected");
3080    return MatchOperand_ParseFail;
3081  }
3082  Parser.Lex(); // Eat '}' token.
3083
3084  switch (LaneKind) {
3085  case NoLanes:
3086    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3087                                                    (Spacing == 2), S, E));
3088    break;
3089  case AllLanes:
3090    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3091                                                            (Spacing == 2),
3092                                                            S, E));
3093    break;
3094  case IndexedLane:
3095    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3096                                                           LaneIndex,
3097                                                           (Spacing == 2),
3098                                                           S, E));
3099    break;
3100  }
3101  return MatchOperand_Success;
3102}
3103
3104/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3105ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3106parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3107  SMLoc S = Parser.getTok().getLoc();
3108  const AsmToken &Tok = Parser.getTok();
3109  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3110  StringRef OptStr = Tok.getString();
3111
3112  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3113    .Case("sy",    ARM_MB::SY)
3114    .Case("st",    ARM_MB::ST)
3115    .Case("sh",    ARM_MB::ISH)
3116    .Case("ish",   ARM_MB::ISH)
3117    .Case("shst",  ARM_MB::ISHST)
3118    .Case("ishst", ARM_MB::ISHST)
3119    .Case("nsh",   ARM_MB::NSH)
3120    .Case("un",    ARM_MB::NSH)
3121    .Case("nshst", ARM_MB::NSHST)
3122    .Case("unst",  ARM_MB::NSHST)
3123    .Case("osh",   ARM_MB::OSH)
3124    .Case("oshst", ARM_MB::OSHST)
3125    .Default(~0U);
3126
3127  if (Opt == ~0U)
3128    return MatchOperand_NoMatch;
3129
3130  Parser.Lex(); // Eat identifier token.
3131  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3132  return MatchOperand_Success;
3133}
3134
3135/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3136ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3137parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3138  SMLoc S = Parser.getTok().getLoc();
3139  const AsmToken &Tok = Parser.getTok();
3140  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3141  StringRef IFlagsStr = Tok.getString();
3142
3143  // An iflags string of "none" is interpreted to mean that none of the AIF
3144  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3145  unsigned IFlags = 0;
3146  if (IFlagsStr != "none") {
3147        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3148      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3149        .Case("a", ARM_PROC::A)
3150        .Case("i", ARM_PROC::I)
3151        .Case("f", ARM_PROC::F)
3152        .Default(~0U);
3153
3154      // If some specific iflag is already set, it means that some letter is
3155      // present more than once, this is not acceptable.
3156      if (Flag == ~0U || (IFlags & Flag))
3157        return MatchOperand_NoMatch;
3158
3159      IFlags |= Flag;
3160    }
3161  }
3162
3163  Parser.Lex(); // Eat identifier token.
3164  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3165  return MatchOperand_Success;
3166}
3167
3168/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3169ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3170parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3171  SMLoc S = Parser.getTok().getLoc();
3172  const AsmToken &Tok = Parser.getTok();
3173  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3174  StringRef Mask = Tok.getString();
3175
3176  if (isMClass()) {
3177    // See ARMv6-M 10.1.1
3178    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3179      .Case("apsr", 0)
3180      .Case("iapsr", 1)
3181      .Case("eapsr", 2)
3182      .Case("xpsr", 3)
3183      .Case("ipsr", 5)
3184      .Case("epsr", 6)
3185      .Case("iepsr", 7)
3186      .Case("msp", 8)
3187      .Case("psp", 9)
3188      .Case("primask", 16)
3189      .Case("basepri", 17)
3190      .Case("basepri_max", 18)
3191      .Case("faultmask", 19)
3192      .Case("control", 20)
3193      .Default(~0U);
3194
3195    if (FlagsVal == ~0U)
3196      return MatchOperand_NoMatch;
3197
3198    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3199      // basepri, basepri_max and faultmask only valid for V7m.
3200      return MatchOperand_NoMatch;
3201
3202    Parser.Lex(); // Eat identifier token.
3203    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3204    return MatchOperand_Success;
3205  }
3206
3207  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3208  size_t Start = 0, Next = Mask.find('_');
3209  StringRef Flags = "";
3210  std::string SpecReg = Mask.slice(Start, Next).lower();
3211  if (Next != StringRef::npos)
3212    Flags = Mask.slice(Next+1, Mask.size());
3213
3214  // FlagsVal contains the complete mask:
3215  // 3-0: Mask
3216  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3217  unsigned FlagsVal = 0;
3218
3219  if (SpecReg == "apsr") {
3220    FlagsVal = StringSwitch<unsigned>(Flags)
3221    .Case("nzcvq",  0x8) // same as CPSR_f
3222    .Case("g",      0x4) // same as CPSR_s
3223    .Case("nzcvqg", 0xc) // same as CPSR_fs
3224    .Default(~0U);
3225
3226    if (FlagsVal == ~0U) {
3227      if (!Flags.empty())
3228        return MatchOperand_NoMatch;
3229      else
3230        FlagsVal = 8; // No flag
3231    }
3232  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3233    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3234      Flags = "fc";
3235    for (int i = 0, e = Flags.size(); i != e; ++i) {
3236      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3237      .Case("c", 1)
3238      .Case("x", 2)
3239      .Case("s", 4)
3240      .Case("f", 8)
3241      .Default(~0U);
3242
3243      // If some specific flag is already set, it means that some letter is
3244      // present more than once, this is not acceptable.
3245      if (FlagsVal == ~0U || (FlagsVal & Flag))
3246        return MatchOperand_NoMatch;
3247      FlagsVal |= Flag;
3248    }
3249  } else // No match for special register.
3250    return MatchOperand_NoMatch;
3251
3252  // Special register without flags is NOT equivalent to "fc" flags.
3253  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3254  // two lines would enable gas compatibility at the expense of breaking
3255  // round-tripping.
3256  //
3257  // if (!FlagsVal)
3258  //  FlagsVal = 0x9;
3259
3260  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3261  if (SpecReg == "spsr")
3262    FlagsVal |= 16;
3263
3264  Parser.Lex(); // Eat identifier token.
3265  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3266  return MatchOperand_Success;
3267}
3268
3269ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3270parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3271            int Low, int High) {
3272  const AsmToken &Tok = Parser.getTok();
3273  if (Tok.isNot(AsmToken::Identifier)) {
3274    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3275    return MatchOperand_ParseFail;
3276  }
3277  StringRef ShiftName = Tok.getString();
3278  std::string LowerOp = Op.lower();
3279  std::string UpperOp = Op.upper();
3280  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3281    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3282    return MatchOperand_ParseFail;
3283  }
3284  Parser.Lex(); // Eat shift type token.
3285
3286  // There must be a '#' and a shift amount.
3287  if (Parser.getTok().isNot(AsmToken::Hash) &&
3288      Parser.getTok().isNot(AsmToken::Dollar)) {
3289    Error(Parser.getTok().getLoc(), "'#' expected");
3290    return MatchOperand_ParseFail;
3291  }
3292  Parser.Lex(); // Eat hash token.
3293
3294  const MCExpr *ShiftAmount;
3295  SMLoc Loc = Parser.getTok().getLoc();
3296  if (getParser().ParseExpression(ShiftAmount)) {
3297    Error(Loc, "illegal expression");
3298    return MatchOperand_ParseFail;
3299  }
3300  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3301  if (!CE) {
3302    Error(Loc, "constant expression expected");
3303    return MatchOperand_ParseFail;
3304  }
3305  int Val = CE->getValue();
3306  if (Val < Low || Val > High) {
3307    Error(Loc, "immediate value out of range");
3308    return MatchOperand_ParseFail;
3309  }
3310
3311  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3312
3313  return MatchOperand_Success;
3314}
3315
3316ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3317parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3318  const AsmToken &Tok = Parser.getTok();
3319  SMLoc S = Tok.getLoc();
3320  if (Tok.isNot(AsmToken::Identifier)) {
3321    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3322    return MatchOperand_ParseFail;
3323  }
3324  int Val = StringSwitch<int>(Tok.getString())
3325    .Case("be", 1)
3326    .Case("le", 0)
3327    .Default(-1);
3328  Parser.Lex(); // Eat the token.
3329
3330  if (Val == -1) {
3331    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3332    return MatchOperand_ParseFail;
3333  }
3334  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3335                                                                  getContext()),
3336                                           S, Parser.getTok().getLoc()));
3337  return MatchOperand_Success;
3338}
3339
3340/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3341/// instructions. Legal values are:
3342///     lsl #n  'n' in [0,31]
3343///     asr #n  'n' in [1,32]
3344///             n == 32 encoded as n == 0.
3345ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3346parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3347  const AsmToken &Tok = Parser.getTok();
3348  SMLoc S = Tok.getLoc();
3349  if (Tok.isNot(AsmToken::Identifier)) {
3350    Error(S, "shift operator 'asr' or 'lsl' expected");
3351    return MatchOperand_ParseFail;
3352  }
3353  StringRef ShiftName = Tok.getString();
3354  bool isASR;
3355  if (ShiftName == "lsl" || ShiftName == "LSL")
3356    isASR = false;
3357  else if (ShiftName == "asr" || ShiftName == "ASR")
3358    isASR = true;
3359  else {
3360    Error(S, "shift operator 'asr' or 'lsl' expected");
3361    return MatchOperand_ParseFail;
3362  }
3363  Parser.Lex(); // Eat the operator.
3364
3365  // A '#' and a shift amount.
3366  if (Parser.getTok().isNot(AsmToken::Hash) &&
3367      Parser.getTok().isNot(AsmToken::Dollar)) {
3368    Error(Parser.getTok().getLoc(), "'#' expected");
3369    return MatchOperand_ParseFail;
3370  }
3371  Parser.Lex(); // Eat hash token.
3372
3373  const MCExpr *ShiftAmount;
3374  SMLoc E = Parser.getTok().getLoc();
3375  if (getParser().ParseExpression(ShiftAmount)) {
3376    Error(E, "malformed shift expression");
3377    return MatchOperand_ParseFail;
3378  }
3379  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3380  if (!CE) {
3381    Error(E, "shift amount must be an immediate");
3382    return MatchOperand_ParseFail;
3383  }
3384
3385  int64_t Val = CE->getValue();
3386  if (isASR) {
3387    // Shift amount must be in [1,32]
3388    if (Val < 1 || Val > 32) {
3389      Error(E, "'asr' shift amount must be in range [1,32]");
3390      return MatchOperand_ParseFail;
3391    }
3392    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3393    if (isThumb() && Val == 32) {
3394      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3395      return MatchOperand_ParseFail;
3396    }
3397    if (Val == 32) Val = 0;
3398  } else {
3399    // Shift amount must be in [1,32]
3400    if (Val < 0 || Val > 31) {
3401      Error(E, "'lsr' shift amount must be in range [0,31]");
3402      return MatchOperand_ParseFail;
3403    }
3404  }
3405
3406  E = Parser.getTok().getLoc();
3407  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3408
3409  return MatchOperand_Success;
3410}
3411
3412/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3413/// of instructions. Legal values are:
3414///     ror #n  'n' in {0, 8, 16, 24}
3415ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3416parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3417  const AsmToken &Tok = Parser.getTok();
3418  SMLoc S = Tok.getLoc();
3419  if (Tok.isNot(AsmToken::Identifier))
3420    return MatchOperand_NoMatch;
3421  StringRef ShiftName = Tok.getString();
3422  if (ShiftName != "ror" && ShiftName != "ROR")
3423    return MatchOperand_NoMatch;
3424  Parser.Lex(); // Eat the operator.
3425
3426  // A '#' and a rotate amount.
3427  if (Parser.getTok().isNot(AsmToken::Hash) &&
3428      Parser.getTok().isNot(AsmToken::Dollar)) {
3429    Error(Parser.getTok().getLoc(), "'#' expected");
3430    return MatchOperand_ParseFail;
3431  }
3432  Parser.Lex(); // Eat hash token.
3433
3434  const MCExpr *ShiftAmount;
3435  SMLoc E = Parser.getTok().getLoc();
3436  if (getParser().ParseExpression(ShiftAmount)) {
3437    Error(E, "malformed rotate expression");
3438    return MatchOperand_ParseFail;
3439  }
3440  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3441  if (!CE) {
3442    Error(E, "rotate amount must be an immediate");
3443    return MatchOperand_ParseFail;
3444  }
3445
3446  int64_t Val = CE->getValue();
3447  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3448  // normally, zero is represented in asm by omitting the rotate operand
3449  // entirely.
3450  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3451    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3452    return MatchOperand_ParseFail;
3453  }
3454
3455  E = Parser.getTok().getLoc();
3456  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3457
3458  return MatchOperand_Success;
3459}
3460
3461ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3462parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3463  SMLoc S = Parser.getTok().getLoc();
3464  // The bitfield descriptor is really two operands, the LSB and the width.
3465  if (Parser.getTok().isNot(AsmToken::Hash) &&
3466      Parser.getTok().isNot(AsmToken::Dollar)) {
3467    Error(Parser.getTok().getLoc(), "'#' expected");
3468    return MatchOperand_ParseFail;
3469  }
3470  Parser.Lex(); // Eat hash token.
3471
3472  const MCExpr *LSBExpr;
3473  SMLoc E = Parser.getTok().getLoc();
3474  if (getParser().ParseExpression(LSBExpr)) {
3475    Error(E, "malformed immediate expression");
3476    return MatchOperand_ParseFail;
3477  }
3478  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3479  if (!CE) {
3480    Error(E, "'lsb' operand must be an immediate");
3481    return MatchOperand_ParseFail;
3482  }
3483
3484  int64_t LSB = CE->getValue();
3485  // The LSB must be in the range [0,31]
3486  if (LSB < 0 || LSB > 31) {
3487    Error(E, "'lsb' operand must be in the range [0,31]");
3488    return MatchOperand_ParseFail;
3489  }
3490  E = Parser.getTok().getLoc();
3491
3492  // Expect another immediate operand.
3493  if (Parser.getTok().isNot(AsmToken::Comma)) {
3494    Error(Parser.getTok().getLoc(), "too few operands");
3495    return MatchOperand_ParseFail;
3496  }
3497  Parser.Lex(); // Eat hash token.
3498  if (Parser.getTok().isNot(AsmToken::Hash) &&
3499      Parser.getTok().isNot(AsmToken::Dollar)) {
3500    Error(Parser.getTok().getLoc(), "'#' expected");
3501    return MatchOperand_ParseFail;
3502  }
3503  Parser.Lex(); // Eat hash token.
3504
3505  const MCExpr *WidthExpr;
3506  if (getParser().ParseExpression(WidthExpr)) {
3507    Error(E, "malformed immediate expression");
3508    return MatchOperand_ParseFail;
3509  }
3510  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3511  if (!CE) {
3512    Error(E, "'width' operand must be an immediate");
3513    return MatchOperand_ParseFail;
3514  }
3515
3516  int64_t Width = CE->getValue();
3517  // The LSB must be in the range [1,32-lsb]
3518  if (Width < 1 || Width > 32 - LSB) {
3519    Error(E, "'width' operand must be in the range [1,32-lsb]");
3520    return MatchOperand_ParseFail;
3521  }
3522  E = Parser.getTok().getLoc();
3523
3524  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3525
3526  return MatchOperand_Success;
3527}
3528
3529ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3530parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3531  // Check for a post-index addressing register operand. Specifically:
3532  // postidx_reg := '+' register {, shift}
3533  //              | '-' register {, shift}
3534  //              | register {, shift}
3535
3536  // This method must return MatchOperand_NoMatch without consuming any tokens
3537  // in the case where there is no match, as other alternatives take other
3538  // parse methods.
3539  AsmToken Tok = Parser.getTok();
3540  SMLoc S = Tok.getLoc();
3541  bool haveEaten = false;
3542  bool isAdd = true;
3543  int Reg = -1;
3544  if (Tok.is(AsmToken::Plus)) {
3545    Parser.Lex(); // Eat the '+' token.
3546    haveEaten = true;
3547  } else if (Tok.is(AsmToken::Minus)) {
3548    Parser.Lex(); // Eat the '-' token.
3549    isAdd = false;
3550    haveEaten = true;
3551  }
3552  if (Parser.getTok().is(AsmToken::Identifier))
3553    Reg = tryParseRegister();
3554  if (Reg == -1) {
3555    if (!haveEaten)
3556      return MatchOperand_NoMatch;
3557    Error(Parser.getTok().getLoc(), "register expected");
3558    return MatchOperand_ParseFail;
3559  }
3560  SMLoc E = Parser.getTok().getLoc();
3561
3562  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3563  unsigned ShiftImm = 0;
3564  if (Parser.getTok().is(AsmToken::Comma)) {
3565    Parser.Lex(); // Eat the ','.
3566    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3567      return MatchOperand_ParseFail;
3568  }
3569
3570  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3571                                                  ShiftImm, S, E));
3572
3573  return MatchOperand_Success;
3574}
3575
3576ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3577parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3578  // Check for a post-index addressing register operand. Specifically:
3579  // am3offset := '+' register
3580  //              | '-' register
3581  //              | register
3582  //              | # imm
3583  //              | # + imm
3584  //              | # - imm
3585
3586  // This method must return MatchOperand_NoMatch without consuming any tokens
3587  // in the case where there is no match, as other alternatives take other
3588  // parse methods.
3589  AsmToken Tok = Parser.getTok();
3590  SMLoc S = Tok.getLoc();
3591
3592  // Do immediates first, as we always parse those if we have a '#'.
3593  if (Parser.getTok().is(AsmToken::Hash) ||
3594      Parser.getTok().is(AsmToken::Dollar)) {
3595    Parser.Lex(); // Eat the '#'.
3596    // Explicitly look for a '-', as we need to encode negative zero
3597    // differently.
3598    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3599    const MCExpr *Offset;
3600    if (getParser().ParseExpression(Offset))
3601      return MatchOperand_ParseFail;
3602    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3603    if (!CE) {
3604      Error(S, "constant expression expected");
3605      return MatchOperand_ParseFail;
3606    }
3607    SMLoc E = Tok.getLoc();
3608    // Negative zero is encoded as the flag value INT32_MIN.
3609    int32_t Val = CE->getValue();
3610    if (isNegative && Val == 0)
3611      Val = INT32_MIN;
3612
3613    Operands.push_back(
3614      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3615
3616    return MatchOperand_Success;
3617  }
3618
3619
3620  bool haveEaten = false;
3621  bool isAdd = true;
3622  int Reg = -1;
3623  if (Tok.is(AsmToken::Plus)) {
3624    Parser.Lex(); // Eat the '+' token.
3625    haveEaten = true;
3626  } else if (Tok.is(AsmToken::Minus)) {
3627    Parser.Lex(); // Eat the '-' token.
3628    isAdd = false;
3629    haveEaten = true;
3630  }
3631  if (Parser.getTok().is(AsmToken::Identifier))
3632    Reg = tryParseRegister();
3633  if (Reg == -1) {
3634    if (!haveEaten)
3635      return MatchOperand_NoMatch;
3636    Error(Parser.getTok().getLoc(), "register expected");
3637    return MatchOperand_ParseFail;
3638  }
3639  SMLoc E = Parser.getTok().getLoc();
3640
3641  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3642                                                  0, S, E));
3643
3644  return MatchOperand_Success;
3645}
3646
3647/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3648/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3649/// when they refer multiple MIOperands inside a single one.
3650bool ARMAsmParser::
3651cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3652             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3653  // Rt, Rt2
3654  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3655  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3656  // Create a writeback register dummy placeholder.
3657  Inst.addOperand(MCOperand::CreateReg(0));
3658  // addr
3659  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3660  // pred
3661  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3662  return true;
3663}
3664
3665/// cvtT2StrdPre - Convert parsed operands to MCInst.
3666/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3667/// when they refer multiple MIOperands inside a single one.
3668bool ARMAsmParser::
3669cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3670             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3671  // Create a writeback register dummy placeholder.
3672  Inst.addOperand(MCOperand::CreateReg(0));
3673  // Rt, Rt2
3674  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3675  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3676  // addr
3677  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3678  // pred
3679  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3680  return true;
3681}
3682
3683/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3684/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3685/// when they refer multiple MIOperands inside a single one.
3686bool ARMAsmParser::
3687cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3688                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3689  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3690
3691  // Create a writeback register dummy placeholder.
3692  Inst.addOperand(MCOperand::CreateImm(0));
3693
3694  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3695  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3696  return true;
3697}
3698
3699/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3700/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3701/// when they refer multiple MIOperands inside a single one.
3702bool ARMAsmParser::
3703cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3704                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3705  // Create a writeback register dummy placeholder.
3706  Inst.addOperand(MCOperand::CreateImm(0));
3707  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3708  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3709  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3710  return true;
3711}
3712
3713/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3714/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3715/// when they refer multiple MIOperands inside a single one.
3716bool ARMAsmParser::
3717cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3718                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3719  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3720
3721  // Create a writeback register dummy placeholder.
3722  Inst.addOperand(MCOperand::CreateImm(0));
3723
3724  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3725  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3726  return true;
3727}
3728
3729/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3730/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3731/// when they refer multiple MIOperands inside a single one.
3732bool ARMAsmParser::
3733cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3734                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3735  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3736
3737  // Create a writeback register dummy placeholder.
3738  Inst.addOperand(MCOperand::CreateImm(0));
3739
3740  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3741  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3742  return true;
3743}
3744
3745
3746/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3747/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3748/// when they refer multiple MIOperands inside a single one.
3749bool ARMAsmParser::
3750cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3751                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3752  // Create a writeback register dummy placeholder.
3753  Inst.addOperand(MCOperand::CreateImm(0));
3754  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3755  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3756  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3757  return true;
3758}
3759
3760/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3761/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3762/// when they refer multiple MIOperands inside a single one.
3763bool ARMAsmParser::
3764cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3765                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3766  // Create a writeback register dummy placeholder.
3767  Inst.addOperand(MCOperand::CreateImm(0));
3768  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3769  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3770  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3771  return true;
3772}
3773
3774/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3775/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3776/// when they refer multiple MIOperands inside a single one.
3777bool ARMAsmParser::
3778cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3779                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3780  // Create a writeback register dummy placeholder.
3781  Inst.addOperand(MCOperand::CreateImm(0));
3782  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3783  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3784  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3785  return true;
3786}
3787
3788/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3789/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3790/// when they refer multiple MIOperands inside a single one.
3791bool ARMAsmParser::
3792cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3793                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3794  // Rt
3795  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3796  // Create a writeback register dummy placeholder.
3797  Inst.addOperand(MCOperand::CreateImm(0));
3798  // addr
3799  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3800  // offset
3801  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3802  // pred
3803  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3804  return true;
3805}
3806
3807/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3808/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3809/// when they refer multiple MIOperands inside a single one.
3810bool ARMAsmParser::
3811cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3812                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3813  // Rt
3814  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3815  // Create a writeback register dummy placeholder.
3816  Inst.addOperand(MCOperand::CreateImm(0));
3817  // addr
3818  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3819  // offset
3820  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3821  // pred
3822  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3823  return true;
3824}
3825
3826/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3827/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3828/// when they refer multiple MIOperands inside a single one.
3829bool ARMAsmParser::
3830cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3831                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3832  // Create a writeback register dummy placeholder.
3833  Inst.addOperand(MCOperand::CreateImm(0));
3834  // Rt
3835  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3836  // addr
3837  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3838  // offset
3839  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3840  // pred
3841  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3842  return true;
3843}
3844
3845/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3846/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3847/// when they refer multiple MIOperands inside a single one.
3848bool ARMAsmParser::
3849cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3850                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3851  // Create a writeback register dummy placeholder.
3852  Inst.addOperand(MCOperand::CreateImm(0));
3853  // Rt
3854  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3855  // addr
3856  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3857  // offset
3858  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3859  // pred
3860  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3861  return true;
3862}
3863
3864/// cvtLdrdPre - Convert parsed operands to MCInst.
3865/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3866/// when they refer multiple MIOperands inside a single one.
3867bool ARMAsmParser::
3868cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3869           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3870  // Rt, Rt2
3871  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3872  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3873  // Create a writeback register dummy placeholder.
3874  Inst.addOperand(MCOperand::CreateImm(0));
3875  // addr
3876  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3877  // pred
3878  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3879  return true;
3880}
3881
3882/// cvtStrdPre - Convert parsed operands to MCInst.
3883/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3884/// when they refer multiple MIOperands inside a single one.
3885bool ARMAsmParser::
3886cvtStrdPre(MCInst &Inst, unsigned Opcode,
3887           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3888  // Create a writeback register dummy placeholder.
3889  Inst.addOperand(MCOperand::CreateImm(0));
3890  // Rt, Rt2
3891  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3892  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3893  // addr
3894  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3895  // pred
3896  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3897  return true;
3898}
3899
3900/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3901/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3902/// when they refer multiple MIOperands inside a single one.
3903bool ARMAsmParser::
3904cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3905                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3906  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3907  // Create a writeback register dummy placeholder.
3908  Inst.addOperand(MCOperand::CreateImm(0));
3909  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3910  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3911  return true;
3912}
3913
3914/// cvtThumbMultiple- Convert parsed operands to MCInst.
3915/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3916/// when they refer multiple MIOperands inside a single one.
3917bool ARMAsmParser::
3918cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3919           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3920  // The second source operand must be the same register as the destination
3921  // operand.
3922  if (Operands.size() == 6 &&
3923      (((ARMOperand*)Operands[3])->getReg() !=
3924       ((ARMOperand*)Operands[5])->getReg()) &&
3925      (((ARMOperand*)Operands[3])->getReg() !=
3926       ((ARMOperand*)Operands[4])->getReg())) {
3927    Error(Operands[3]->getStartLoc(),
3928          "destination register must match source register");
3929    return false;
3930  }
3931  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3932  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3933  // If we have a three-operand form, make sure to set Rn to be the operand
3934  // that isn't the same as Rd.
3935  unsigned RegOp = 4;
3936  if (Operands.size() == 6 &&
3937      ((ARMOperand*)Operands[4])->getReg() ==
3938        ((ARMOperand*)Operands[3])->getReg())
3939    RegOp = 5;
3940  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3941  Inst.addOperand(Inst.getOperand(0));
3942  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3943
3944  return true;
3945}
3946
3947bool ARMAsmParser::
3948cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3949              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3950  // Vd
3951  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3952  // Create a writeback register dummy placeholder.
3953  Inst.addOperand(MCOperand::CreateImm(0));
3954  // Vn
3955  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3956  // pred
3957  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3958  return true;
3959}
3960
3961bool ARMAsmParser::
3962cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3963                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3964  // Vd
3965  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3966  // Create a writeback register dummy placeholder.
3967  Inst.addOperand(MCOperand::CreateImm(0));
3968  // Vn
3969  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3970  // Vm
3971  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3972  // pred
3973  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3974  return true;
3975}
3976
3977bool ARMAsmParser::
3978cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3979              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3980  // Create a writeback register dummy placeholder.
3981  Inst.addOperand(MCOperand::CreateImm(0));
3982  // Vn
3983  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3984  // Vt
3985  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3986  // pred
3987  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3988  return true;
3989}
3990
3991bool ARMAsmParser::
3992cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3993                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3994  // Create a writeback register dummy placeholder.
3995  Inst.addOperand(MCOperand::CreateImm(0));
3996  // Vn
3997  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3998  // Vm
3999  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4000  // Vt
4001  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4002  // pred
4003  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4004  return true;
4005}
4006
4007/// Parse an ARM memory expression, return false if successful else return true
4008/// or an error.  The first token must be a '[' when called.
4009bool ARMAsmParser::
4010parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4011  SMLoc S, E;
4012  assert(Parser.getTok().is(AsmToken::LBrac) &&
4013         "Token is not a Left Bracket");
4014  S = Parser.getTok().getLoc();
4015  Parser.Lex(); // Eat left bracket token.
4016
4017  const AsmToken &BaseRegTok = Parser.getTok();
4018  int BaseRegNum = tryParseRegister();
4019  if (BaseRegNum == -1)
4020    return Error(BaseRegTok.getLoc(), "register expected");
4021
4022  // The next token must either be a comma or a closing bracket.
4023  const AsmToken &Tok = Parser.getTok();
4024  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4025    return Error(Tok.getLoc(), "malformed memory operand");
4026
4027  if (Tok.is(AsmToken::RBrac)) {
4028    E = Tok.getLoc();
4029    Parser.Lex(); // Eat right bracket token.
4030
4031    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4032                                             0, 0, false, S, E));
4033
4034    // If there's a pre-indexing writeback marker, '!', just add it as a token
4035    // operand. It's rather odd, but syntactically valid.
4036    if (Parser.getTok().is(AsmToken::Exclaim)) {
4037      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4038      Parser.Lex(); // Eat the '!'.
4039    }
4040
4041    return false;
4042  }
4043
4044  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4045  Parser.Lex(); // Eat the comma.
4046
4047  // If we have a ':', it's an alignment specifier.
4048  if (Parser.getTok().is(AsmToken::Colon)) {
4049    Parser.Lex(); // Eat the ':'.
4050    E = Parser.getTok().getLoc();
4051
4052    const MCExpr *Expr;
4053    if (getParser().ParseExpression(Expr))
4054     return true;
4055
4056    // The expression has to be a constant. Memory references with relocations
4057    // don't come through here, as they use the <label> forms of the relevant
4058    // instructions.
4059    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4060    if (!CE)
4061      return Error (E, "constant expression expected");
4062
4063    unsigned Align = 0;
4064    switch (CE->getValue()) {
4065    default:
4066      return Error(E,
4067                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4068    case 16:  Align = 2; break;
4069    case 32:  Align = 4; break;
4070    case 64:  Align = 8; break;
4071    case 128: Align = 16; break;
4072    case 256: Align = 32; break;
4073    }
4074
4075    // Now we should have the closing ']'
4076    E = Parser.getTok().getLoc();
4077    if (Parser.getTok().isNot(AsmToken::RBrac))
4078      return Error(E, "']' expected");
4079    Parser.Lex(); // Eat right bracket token.
4080
4081    // Don't worry about range checking the value here. That's handled by
4082    // the is*() predicates.
4083    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4084                                             ARM_AM::no_shift, 0, Align,
4085                                             false, S, E));
4086
4087    // If there's a pre-indexing writeback marker, '!', just add it as a token
4088    // operand.
4089    if (Parser.getTok().is(AsmToken::Exclaim)) {
4090      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4091      Parser.Lex(); // Eat the '!'.
4092    }
4093
4094    return false;
4095  }
4096
4097  // If we have a '#', it's an immediate offset, else assume it's a register
4098  // offset. Be friendly and also accept a plain integer (without a leading
4099  // hash) for gas compatibility.
4100  if (Parser.getTok().is(AsmToken::Hash) ||
4101      Parser.getTok().is(AsmToken::Dollar) ||
4102      Parser.getTok().is(AsmToken::Integer)) {
4103    if (Parser.getTok().isNot(AsmToken::Integer))
4104      Parser.Lex(); // Eat the '#'.
4105    E = Parser.getTok().getLoc();
4106
4107    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4108    const MCExpr *Offset;
4109    if (getParser().ParseExpression(Offset))
4110     return true;
4111
4112    // The expression has to be a constant. Memory references with relocations
4113    // don't come through here, as they use the <label> forms of the relevant
4114    // instructions.
4115    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4116    if (!CE)
4117      return Error (E, "constant expression expected");
4118
4119    // If the constant was #-0, represent it as INT32_MIN.
4120    int32_t Val = CE->getValue();
4121    if (isNegative && Val == 0)
4122      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4123
4124    // Now we should have the closing ']'
4125    E = Parser.getTok().getLoc();
4126    if (Parser.getTok().isNot(AsmToken::RBrac))
4127      return Error(E, "']' expected");
4128    Parser.Lex(); // Eat right bracket token.
4129
4130    // Don't worry about range checking the value here. That's handled by
4131    // the is*() predicates.
4132    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4133                                             ARM_AM::no_shift, 0, 0,
4134                                             false, S, E));
4135
4136    // If there's a pre-indexing writeback marker, '!', just add it as a token
4137    // operand.
4138    if (Parser.getTok().is(AsmToken::Exclaim)) {
4139      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4140      Parser.Lex(); // Eat the '!'.
4141    }
4142
4143    return false;
4144  }
4145
4146  // The register offset is optionally preceded by a '+' or '-'
4147  bool isNegative = false;
4148  if (Parser.getTok().is(AsmToken::Minus)) {
4149    isNegative = true;
4150    Parser.Lex(); // Eat the '-'.
4151  } else if (Parser.getTok().is(AsmToken::Plus)) {
4152    // Nothing to do.
4153    Parser.Lex(); // Eat the '+'.
4154  }
4155
4156  E = Parser.getTok().getLoc();
4157  int OffsetRegNum = tryParseRegister();
4158  if (OffsetRegNum == -1)
4159    return Error(E, "register expected");
4160
4161  // If there's a shift operator, handle it.
4162  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4163  unsigned ShiftImm = 0;
4164  if (Parser.getTok().is(AsmToken::Comma)) {
4165    Parser.Lex(); // Eat the ','.
4166    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4167      return true;
4168  }
4169
4170  // Now we should have the closing ']'
4171  E = Parser.getTok().getLoc();
4172  if (Parser.getTok().isNot(AsmToken::RBrac))
4173    return Error(E, "']' expected");
4174  Parser.Lex(); // Eat right bracket token.
4175
4176  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4177                                           ShiftType, ShiftImm, 0, isNegative,
4178                                           S, E));
4179
4180  // If there's a pre-indexing writeback marker, '!', just add it as a token
4181  // operand.
4182  if (Parser.getTok().is(AsmToken::Exclaim)) {
4183    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4184    Parser.Lex(); // Eat the '!'.
4185  }
4186
4187  return false;
4188}
4189
4190/// parseMemRegOffsetShift - one of these two:
4191///   ( lsl | lsr | asr | ror ) , # shift_amount
4192///   rrx
4193/// return true if it parses a shift otherwise it returns false.
4194bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4195                                          unsigned &Amount) {
4196  SMLoc Loc = Parser.getTok().getLoc();
4197  const AsmToken &Tok = Parser.getTok();
4198  if (Tok.isNot(AsmToken::Identifier))
4199    return true;
4200  StringRef ShiftName = Tok.getString();
4201  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4202      ShiftName == "asl" || ShiftName == "ASL")
4203    St = ARM_AM::lsl;
4204  else if (ShiftName == "lsr" || ShiftName == "LSR")
4205    St = ARM_AM::lsr;
4206  else if (ShiftName == "asr" || ShiftName == "ASR")
4207    St = ARM_AM::asr;
4208  else if (ShiftName == "ror" || ShiftName == "ROR")
4209    St = ARM_AM::ror;
4210  else if (ShiftName == "rrx" || ShiftName == "RRX")
4211    St = ARM_AM::rrx;
4212  else
4213    return Error(Loc, "illegal shift operator");
4214  Parser.Lex(); // Eat shift type token.
4215
4216  // rrx stands alone.
4217  Amount = 0;
4218  if (St != ARM_AM::rrx) {
4219    Loc = Parser.getTok().getLoc();
4220    // A '#' and a shift amount.
4221    const AsmToken &HashTok = Parser.getTok();
4222    if (HashTok.isNot(AsmToken::Hash) &&
4223        HashTok.isNot(AsmToken::Dollar))
4224      return Error(HashTok.getLoc(), "'#' expected");
4225    Parser.Lex(); // Eat hash token.
4226
4227    const MCExpr *Expr;
4228    if (getParser().ParseExpression(Expr))
4229      return true;
4230    // Range check the immediate.
4231    // lsl, ror: 0 <= imm <= 31
4232    // lsr, asr: 0 <= imm <= 32
4233    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4234    if (!CE)
4235      return Error(Loc, "shift amount must be an immediate");
4236    int64_t Imm = CE->getValue();
4237    if (Imm < 0 ||
4238        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4239        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4240      return Error(Loc, "immediate shift value out of range");
4241    Amount = Imm;
4242  }
4243
4244  return false;
4245}
4246
4247/// parseFPImm - A floating point immediate expression operand.
4248ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4249parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4250  SMLoc S = Parser.getTok().getLoc();
4251
4252  if (Parser.getTok().isNot(AsmToken::Hash) &&
4253      Parser.getTok().isNot(AsmToken::Dollar))
4254    return MatchOperand_NoMatch;
4255
4256  // Disambiguate the VMOV forms that can accept an FP immediate.
4257  // vmov.f32 <sreg>, #imm
4258  // vmov.f64 <dreg>, #imm
4259  // vmov.f32 <dreg>, #imm  @ vector f32x2
4260  // vmov.f32 <qreg>, #imm  @ vector f32x4
4261  //
4262  // There are also the NEON VMOV instructions which expect an
4263  // integer constant. Make sure we don't try to parse an FPImm
4264  // for these:
4265  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4266  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4267  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4268                           TyOp->getToken() != ".f64"))
4269    return MatchOperand_NoMatch;
4270
4271  Parser.Lex(); // Eat the '#'.
4272
4273  // Handle negation, as that still comes through as a separate token.
4274  bool isNegative = false;
4275  if (Parser.getTok().is(AsmToken::Minus)) {
4276    isNegative = true;
4277    Parser.Lex();
4278  }
4279  const AsmToken &Tok = Parser.getTok();
4280  SMLoc Loc = Tok.getLoc();
4281  if (Tok.is(AsmToken::Real)) {
4282    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4283    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4284    // If we had a '-' in front, toggle the sign bit.
4285    IntVal ^= (uint64_t)isNegative << 63;
4286    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4287    Parser.Lex(); // Eat the token.
4288    if (Val == -1) {
4289      Error(Loc, "floating point value out of range");
4290      return MatchOperand_ParseFail;
4291    }
4292    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4293    return MatchOperand_Success;
4294  }
4295  if (Tok.is(AsmToken::Integer)) {
4296    int64_t Val = Tok.getIntVal();
4297    Parser.Lex(); // Eat the token.
4298    if (Val > 255 || Val < 0) {
4299      Error(Loc, "encoded floating point value out of range");
4300      return MatchOperand_ParseFail;
4301    }
4302    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4303    return MatchOperand_Success;
4304  }
4305
4306  Error(Loc, "invalid floating point immediate");
4307  return MatchOperand_ParseFail;
4308}
4309/// Parse a arm instruction operand.  For now this parses the operand regardless
4310/// of the mnemonic.
4311bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4312                                StringRef Mnemonic) {
4313  SMLoc S, E;
4314
4315  // Check if the current operand has a custom associated parser, if so, try to
4316  // custom parse the operand, or fallback to the general approach.
4317  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4318  if (ResTy == MatchOperand_Success)
4319    return false;
4320  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4321  // there was a match, but an error occurred, in which case, just return that
4322  // the operand parsing failed.
4323  if (ResTy == MatchOperand_ParseFail)
4324    return true;
4325
4326  switch (getLexer().getKind()) {
4327  default:
4328    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4329    return true;
4330  case AsmToken::Identifier: {
4331    if (!tryParseRegisterWithWriteBack(Operands))
4332      return false;
4333    int Res = tryParseShiftRegister(Operands);
4334    if (Res == 0) // success
4335      return false;
4336    else if (Res == -1) // irrecoverable error
4337      return true;
4338    // If this is VMRS, check for the apsr_nzcv operand.
4339    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4340      S = Parser.getTok().getLoc();
4341      Parser.Lex();
4342      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4343      return false;
4344    }
4345
4346    // Fall though for the Identifier case that is not a register or a
4347    // special name.
4348  }
4349  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4350  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4351  case AsmToken::String:  // quoted label names.
4352  case AsmToken::Dot: {   // . as a branch target
4353    // This was not a register so parse other operands that start with an
4354    // identifier (like labels) as expressions and create them as immediates.
4355    const MCExpr *IdVal;
4356    S = Parser.getTok().getLoc();
4357    if (getParser().ParseExpression(IdVal))
4358      return true;
4359    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4360    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4361    return false;
4362  }
4363  case AsmToken::LBrac:
4364    return parseMemory(Operands);
4365  case AsmToken::LCurly:
4366    return parseRegisterList(Operands);
4367  case AsmToken::Dollar:
4368  case AsmToken::Hash: {
4369    // #42 -> immediate.
4370    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4371    S = Parser.getTok().getLoc();
4372    Parser.Lex();
4373    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4374    const MCExpr *ImmVal;
4375    if (getParser().ParseExpression(ImmVal))
4376      return true;
4377    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4378    if (CE) {
4379      int32_t Val = CE->getValue();
4380      if (isNegative && Val == 0)
4381        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4382    }
4383    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4384    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4385    return false;
4386  }
4387  case AsmToken::Colon: {
4388    // ":lower16:" and ":upper16:" expression prefixes
4389    // FIXME: Check it's an expression prefix,
4390    // e.g. (FOO - :lower16:BAR) isn't legal.
4391    ARMMCExpr::VariantKind RefKind;
4392    if (parsePrefix(RefKind))
4393      return true;
4394
4395    const MCExpr *SubExprVal;
4396    if (getParser().ParseExpression(SubExprVal))
4397      return true;
4398
4399    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4400                                                   getContext());
4401    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4402    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4403    return false;
4404  }
4405  }
4406}
4407
4408// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4409//  :lower16: and :upper16:.
4410bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4411  RefKind = ARMMCExpr::VK_ARM_None;
4412
4413  // :lower16: and :upper16: modifiers
4414  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4415  Parser.Lex(); // Eat ':'
4416
4417  if (getLexer().isNot(AsmToken::Identifier)) {
4418    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4419    return true;
4420  }
4421
4422  StringRef IDVal = Parser.getTok().getIdentifier();
4423  if (IDVal == "lower16") {
4424    RefKind = ARMMCExpr::VK_ARM_LO16;
4425  } else if (IDVal == "upper16") {
4426    RefKind = ARMMCExpr::VK_ARM_HI16;
4427  } else {
4428    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4429    return true;
4430  }
4431  Parser.Lex();
4432
4433  if (getLexer().isNot(AsmToken::Colon)) {
4434    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4435    return true;
4436  }
4437  Parser.Lex(); // Eat the last ':'
4438  return false;
4439}
4440
4441/// \brief Given a mnemonic, split out possible predication code and carry
4442/// setting letters to form a canonical mnemonic and flags.
4443//
4444// FIXME: Would be nice to autogen this.
4445// FIXME: This is a bit of a maze of special cases.
4446StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4447                                      unsigned &PredicationCode,
4448                                      bool &CarrySetting,
4449                                      unsigned &ProcessorIMod,
4450                                      StringRef &ITMask) {
4451  PredicationCode = ARMCC::AL;
4452  CarrySetting = false;
4453  ProcessorIMod = 0;
4454
4455  // Ignore some mnemonics we know aren't predicated forms.
4456  //
4457  // FIXME: Would be nice to autogen this.
4458  if ((Mnemonic == "movs" && isThumb()) ||
4459      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4460      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4461      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4462      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4463      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4464      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4465      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4466      Mnemonic == "fmuls")
4467    return Mnemonic;
4468
4469  // First, split out any predication code. Ignore mnemonics we know aren't
4470  // predicated but do have a carry-set and so weren't caught above.
4471  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4472      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4473      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4474      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4475    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4476      .Case("eq", ARMCC::EQ)
4477      .Case("ne", ARMCC::NE)
4478      .Case("hs", ARMCC::HS)
4479      .Case("cs", ARMCC::HS)
4480      .Case("lo", ARMCC::LO)
4481      .Case("cc", ARMCC::LO)
4482      .Case("mi", ARMCC::MI)
4483      .Case("pl", ARMCC::PL)
4484      .Case("vs", ARMCC::VS)
4485      .Case("vc", ARMCC::VC)
4486      .Case("hi", ARMCC::HI)
4487      .Case("ls", ARMCC::LS)
4488      .Case("ge", ARMCC::GE)
4489      .Case("lt", ARMCC::LT)
4490      .Case("gt", ARMCC::GT)
4491      .Case("le", ARMCC::LE)
4492      .Case("al", ARMCC::AL)
4493      .Default(~0U);
4494    if (CC != ~0U) {
4495      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4496      PredicationCode = CC;
4497    }
4498  }
4499
4500  // Next, determine if we have a carry setting bit. We explicitly ignore all
4501  // the instructions we know end in 's'.
4502  if (Mnemonic.endswith("s") &&
4503      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4504        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4505        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4506        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4507        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4508        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4509        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4510        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4511        (Mnemonic == "movs" && isThumb()))) {
4512    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4513    CarrySetting = true;
4514  }
4515
4516  // The "cps" instruction can have a interrupt mode operand which is glued into
4517  // the mnemonic. Check if this is the case, split it and parse the imod op
4518  if (Mnemonic.startswith("cps")) {
4519    // Split out any imod code.
4520    unsigned IMod =
4521      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4522      .Case("ie", ARM_PROC::IE)
4523      .Case("id", ARM_PROC::ID)
4524      .Default(~0U);
4525    if (IMod != ~0U) {
4526      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4527      ProcessorIMod = IMod;
4528    }
4529  }
4530
4531  // The "it" instruction has the condition mask on the end of the mnemonic.
4532  if (Mnemonic.startswith("it")) {
4533    ITMask = Mnemonic.slice(2, Mnemonic.size());
4534    Mnemonic = Mnemonic.slice(0, 2);
4535  }
4536
4537  return Mnemonic;
4538}
4539
4540/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4541/// inclusion of carry set or predication code operands.
4542//
4543// FIXME: It would be nice to autogen this.
4544void ARMAsmParser::
4545getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4546                      bool &CanAcceptPredicationCode) {
4547  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4548      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4549      Mnemonic == "add" || Mnemonic == "adc" ||
4550      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4551      Mnemonic == "orr" || Mnemonic == "mvn" ||
4552      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4553      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4554      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4555                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4556                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4557    CanAcceptCarrySet = true;
4558  } else
4559    CanAcceptCarrySet = false;
4560
4561  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4562      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4563      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4564      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4565      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4566      (Mnemonic == "clrex" && !isThumb()) ||
4567      (Mnemonic == "nop" && isThumbOne()) ||
4568      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4569        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4570        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4571      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4572       !isThumb()) ||
4573      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4574    CanAcceptPredicationCode = false;
4575  } else
4576    CanAcceptPredicationCode = true;
4577
4578  if (isThumb()) {
4579    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4580        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4581      CanAcceptPredicationCode = false;
4582  }
4583}
4584
4585bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4586                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4587  // FIXME: This is all horribly hacky. We really need a better way to deal
4588  // with optional operands like this in the matcher table.
4589
4590  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4591  // another does not. Specifically, the MOVW instruction does not. So we
4592  // special case it here and remove the defaulted (non-setting) cc_out
4593  // operand if that's the instruction we're trying to match.
4594  //
4595  // We do this as post-processing of the explicit operands rather than just
4596  // conditionally adding the cc_out in the first place because we need
4597  // to check the type of the parsed immediate operand.
4598  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4599      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4600      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4601      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4602    return true;
4603
4604  // Register-register 'add' for thumb does not have a cc_out operand
4605  // when there are only two register operands.
4606  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4607      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4608      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4609      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4610    return true;
4611  // Register-register 'add' for thumb does not have a cc_out operand
4612  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4613  // have to check the immediate range here since Thumb2 has a variant
4614  // that can handle a different range and has a cc_out operand.
4615  if (((isThumb() && Mnemonic == "add") ||
4616       (isThumbTwo() && Mnemonic == "sub")) &&
4617      Operands.size() == 6 &&
4618      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4619      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4620      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4621      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4622      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4623       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4624    return true;
4625  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4626  // imm0_4095 variant. That's the least-preferred variant when
4627  // selecting via the generic "add" mnemonic, so to know that we
4628  // should remove the cc_out operand, we have to explicitly check that
4629  // it's not one of the other variants. Ugh.
4630  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4631      Operands.size() == 6 &&
4632      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4633      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4634      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4635    // Nest conditions rather than one big 'if' statement for readability.
4636    //
4637    // If either register is a high reg, it's either one of the SP
4638    // variants (handled above) or a 32-bit encoding, so we just
4639    // check against T3.
4640    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4641         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4642        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4643      return false;
4644    // If both registers are low, we're in an IT block, and the immediate is
4645    // in range, we should use encoding T1 instead, which has a cc_out.
4646    if (inITBlock() &&
4647        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4648        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4649        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4650      return false;
4651
4652    // Otherwise, we use encoding T4, which does not have a cc_out
4653    // operand.
4654    return true;
4655  }
4656
4657  // The thumb2 multiply instruction doesn't have a CCOut register, so
4658  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4659  // use the 16-bit encoding or not.
4660  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4661      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4662      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4663      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4664      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4665      // If the registers aren't low regs, the destination reg isn't the
4666      // same as one of the source regs, or the cc_out operand is zero
4667      // outside of an IT block, we have to use the 32-bit encoding, so
4668      // remove the cc_out operand.
4669      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4670       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4671       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4672       !inITBlock() ||
4673       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4674        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4675        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4676        static_cast<ARMOperand*>(Operands[4])->getReg())))
4677    return true;
4678
4679  // Also check the 'mul' syntax variant that doesn't specify an explicit
4680  // destination register.
4681  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4682      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4683      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4684      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4685      // If the registers aren't low regs  or the cc_out operand is zero
4686      // outside of an IT block, we have to use the 32-bit encoding, so
4687      // remove the cc_out operand.
4688      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4689       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4690       !inITBlock()))
4691    return true;
4692
4693
4694
4695  // Register-register 'add/sub' for thumb does not have a cc_out operand
4696  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4697  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4698  // right, this will result in better diagnostics (which operand is off)
4699  // anyway.
4700  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4701      (Operands.size() == 5 || Operands.size() == 6) &&
4702      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4703      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4704      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4705    return true;
4706
4707  return false;
4708}
4709
4710static bool isDataTypeToken(StringRef Tok) {
4711  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4712    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4713    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4714    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4715    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4716    Tok == ".f" || Tok == ".d";
4717}
4718
4719// FIXME: This bit should probably be handled via an explicit match class
4720// in the .td files that matches the suffix instead of having it be
4721// a literal string token the way it is now.
4722static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4723  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4724}
4725
4726static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4727/// Parse an arm instruction mnemonic followed by its operands.
4728bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4729                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4730  // Apply mnemonic aliases before doing anything else, as the destination
4731  // mnemnonic may include suffices and we want to handle them normally.
4732  // The generic tblgen'erated code does this later, at the start of
4733  // MatchInstructionImpl(), but that's too late for aliases that include
4734  // any sort of suffix.
4735  unsigned AvailableFeatures = getAvailableFeatures();
4736  applyMnemonicAliases(Name, AvailableFeatures);
4737
4738  // First check for the ARM-specific .req directive.
4739  if (Parser.getTok().is(AsmToken::Identifier) &&
4740      Parser.getTok().getIdentifier() == ".req") {
4741    parseDirectiveReq(Name, NameLoc);
4742    // We always return 'error' for this, as we're done with this
4743    // statement and don't need to match the 'instruction."
4744    return true;
4745  }
4746
4747  // Create the leading tokens for the mnemonic, split by '.' characters.
4748  size_t Start = 0, Next = Name.find('.');
4749  StringRef Mnemonic = Name.slice(Start, Next);
4750
4751  // Split out the predication code and carry setting flag from the mnemonic.
4752  unsigned PredicationCode;
4753  unsigned ProcessorIMod;
4754  bool CarrySetting;
4755  StringRef ITMask;
4756  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4757                           ProcessorIMod, ITMask);
4758
4759  // In Thumb1, only the branch (B) instruction can be predicated.
4760  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4761    Parser.EatToEndOfStatement();
4762    return Error(NameLoc, "conditional execution not supported in Thumb1");
4763  }
4764
4765  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4766
4767  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4768  // is the mask as it will be for the IT encoding if the conditional
4769  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4770  // where the conditional bit0 is zero, the instruction post-processing
4771  // will adjust the mask accordingly.
4772  if (Mnemonic == "it") {
4773    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4774    if (ITMask.size() > 3) {
4775      Parser.EatToEndOfStatement();
4776      return Error(Loc, "too many conditions on IT instruction");
4777    }
4778    unsigned Mask = 8;
4779    for (unsigned i = ITMask.size(); i != 0; --i) {
4780      char pos = ITMask[i - 1];
4781      if (pos != 't' && pos != 'e') {
4782        Parser.EatToEndOfStatement();
4783        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4784      }
4785      Mask >>= 1;
4786      if (ITMask[i - 1] == 't')
4787        Mask |= 8;
4788    }
4789    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4790  }
4791
4792  // FIXME: This is all a pretty gross hack. We should automatically handle
4793  // optional operands like this via tblgen.
4794
4795  // Next, add the CCOut and ConditionCode operands, if needed.
4796  //
4797  // For mnemonics which can ever incorporate a carry setting bit or predication
4798  // code, our matching model involves us always generating CCOut and
4799  // ConditionCode operands to match the mnemonic "as written" and then we let
4800  // the matcher deal with finding the right instruction or generating an
4801  // appropriate error.
4802  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4803  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4804
4805  // If we had a carry-set on an instruction that can't do that, issue an
4806  // error.
4807  if (!CanAcceptCarrySet && CarrySetting) {
4808    Parser.EatToEndOfStatement();
4809    return Error(NameLoc, "instruction '" + Mnemonic +
4810                 "' can not set flags, but 's' suffix specified");
4811  }
4812  // If we had a predication code on an instruction that can't do that, issue an
4813  // error.
4814  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4815    Parser.EatToEndOfStatement();
4816    return Error(NameLoc, "instruction '" + Mnemonic +
4817                 "' is not predicable, but condition code specified");
4818  }
4819
4820  // Add the carry setting operand, if necessary.
4821  if (CanAcceptCarrySet) {
4822    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4823    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4824                                               Loc));
4825  }
4826
4827  // Add the predication code operand, if necessary.
4828  if (CanAcceptPredicationCode) {
4829    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4830                                      CarrySetting);
4831    Operands.push_back(ARMOperand::CreateCondCode(
4832                         ARMCC::CondCodes(PredicationCode), Loc));
4833  }
4834
4835  // Add the processor imod operand, if necessary.
4836  if (ProcessorIMod) {
4837    Operands.push_back(ARMOperand::CreateImm(
4838          MCConstantExpr::Create(ProcessorIMod, getContext()),
4839                                 NameLoc, NameLoc));
4840  }
4841
4842  // Add the remaining tokens in the mnemonic.
4843  while (Next != StringRef::npos) {
4844    Start = Next;
4845    Next = Name.find('.', Start + 1);
4846    StringRef ExtraToken = Name.slice(Start, Next);
4847
4848    // Some NEON instructions have an optional datatype suffix that is
4849    // completely ignored. Check for that.
4850    if (isDataTypeToken(ExtraToken) &&
4851        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4852      continue;
4853
4854    if (ExtraToken != ".n") {
4855      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4856      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4857    }
4858  }
4859
4860  // Read the remaining operands.
4861  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4862    // Read the first operand.
4863    if (parseOperand(Operands, Mnemonic)) {
4864      Parser.EatToEndOfStatement();
4865      return true;
4866    }
4867
4868    while (getLexer().is(AsmToken::Comma)) {
4869      Parser.Lex();  // Eat the comma.
4870
4871      // Parse and remember the operand.
4872      if (parseOperand(Operands, Mnemonic)) {
4873        Parser.EatToEndOfStatement();
4874        return true;
4875      }
4876    }
4877  }
4878
4879  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4880    SMLoc Loc = getLexer().getLoc();
4881    Parser.EatToEndOfStatement();
4882    return Error(Loc, "unexpected token in argument list");
4883  }
4884
4885  Parser.Lex(); // Consume the EndOfStatement
4886
4887  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4888  // do and don't have a cc_out optional-def operand. With some spot-checks
4889  // of the operand list, we can figure out which variant we're trying to
4890  // parse and adjust accordingly before actually matching. We shouldn't ever
4891  // try to remove a cc_out operand that was explicitly set on the the
4892  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4893  // table driven matcher doesn't fit well with the ARM instruction set.
4894  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4895    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4896    Operands.erase(Operands.begin() + 1);
4897    delete Op;
4898  }
4899
4900  // ARM mode 'blx' need special handling, as the register operand version
4901  // is predicable, but the label operand version is not. So, we can't rely
4902  // on the Mnemonic based checking to correctly figure out when to put
4903  // a k_CondCode operand in the list. If we're trying to match the label
4904  // version, remove the k_CondCode operand here.
4905  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4906      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4907    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4908    Operands.erase(Operands.begin() + 1);
4909    delete Op;
4910  }
4911
4912  // The vector-compare-to-zero instructions have a literal token "#0" at
4913  // the end that comes to here as an immediate operand. Convert it to a
4914  // token to play nicely with the matcher.
4915  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4916      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4917      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4918    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4919    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4920    if (CE && CE->getValue() == 0) {
4921      Operands.erase(Operands.begin() + 5);
4922      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4923      delete Op;
4924    }
4925  }
4926  // VCMP{E} does the same thing, but with a different operand count.
4927  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4928      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4929    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4930    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4931    if (CE && CE->getValue() == 0) {
4932      Operands.erase(Operands.begin() + 4);
4933      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4934      delete Op;
4935    }
4936  }
4937  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4938  // end. Convert it to a token here. Take care not to convert those
4939  // that should hit the Thumb2 encoding.
4940  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4941      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4942      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4943      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4944    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4945    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4946    if (CE && CE->getValue() == 0 &&
4947        (isThumbOne() ||
4948         // The cc_out operand matches the IT block.
4949         ((inITBlock() != CarrySetting) &&
4950         // Neither register operand is a high register.
4951         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4952          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4953      Operands.erase(Operands.begin() + 5);
4954      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4955      delete Op;
4956    }
4957  }
4958
4959  return false;
4960}
4961
4962// Validate context-sensitive operand constraints.
4963
4964// return 'true' if register list contains non-low GPR registers,
4965// 'false' otherwise. If Reg is in the register list or is HiReg, set
4966// 'containsReg' to true.
4967static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4968                                 unsigned HiReg, bool &containsReg) {
4969  containsReg = false;
4970  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4971    unsigned OpReg = Inst.getOperand(i).getReg();
4972    if (OpReg == Reg)
4973      containsReg = true;
4974    // Anything other than a low register isn't legal here.
4975    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4976      return true;
4977  }
4978  return false;
4979}
4980
4981// Check if the specified regisgter is in the register list of the inst,
4982// starting at the indicated operand number.
4983static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4984  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4985    unsigned OpReg = Inst.getOperand(i).getReg();
4986    if (OpReg == Reg)
4987      return true;
4988  }
4989  return false;
4990}
4991
4992// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4993// the ARMInsts array) instead. Getting that here requires awkward
4994// API changes, though. Better way?
4995namespace llvm {
4996extern const MCInstrDesc ARMInsts[];
4997}
4998static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4999  return ARMInsts[Opcode];
5000}
5001
5002// FIXME: We would really like to be able to tablegen'erate this.
5003bool ARMAsmParser::
5004validateInstruction(MCInst &Inst,
5005                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5006  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5007  SMLoc Loc = Operands[0]->getStartLoc();
5008  // Check the IT block state first.
5009  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
5010  // being allowed in IT blocks, but not being predicable.  It just always
5011  // executes.
5012  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
5013    unsigned bit = 1;
5014    if (ITState.FirstCond)
5015      ITState.FirstCond = false;
5016    else
5017      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5018    // The instruction must be predicable.
5019    if (!MCID.isPredicable())
5020      return Error(Loc, "instructions in IT block must be predicable");
5021    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5022    unsigned ITCond = bit ? ITState.Cond :
5023      ARMCC::getOppositeCondition(ITState.Cond);
5024    if (Cond != ITCond) {
5025      // Find the condition code Operand to get its SMLoc information.
5026      SMLoc CondLoc;
5027      for (unsigned i = 1; i < Operands.size(); ++i)
5028        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5029          CondLoc = Operands[i]->getStartLoc();
5030      return Error(CondLoc, "incorrect condition in IT block; got '" +
5031                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5032                   "', but expected '" +
5033                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5034    }
5035  // Check for non-'al' condition codes outside of the IT block.
5036  } else if (isThumbTwo() && MCID.isPredicable() &&
5037             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5038             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5039             Inst.getOpcode() != ARM::t2B)
5040    return Error(Loc, "predicated instructions must be in IT block");
5041
5042  switch (Inst.getOpcode()) {
5043  case ARM::LDRD:
5044  case ARM::LDRD_PRE:
5045  case ARM::LDRD_POST:
5046  case ARM::LDREXD: {
5047    // Rt2 must be Rt + 1.
5048    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5049    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5050    if (Rt2 != Rt + 1)
5051      return Error(Operands[3]->getStartLoc(),
5052                   "destination operands must be sequential");
5053    return false;
5054  }
5055  case ARM::STRD: {
5056    // Rt2 must be Rt + 1.
5057    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5058    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5059    if (Rt2 != Rt + 1)
5060      return Error(Operands[3]->getStartLoc(),
5061                   "source operands must be sequential");
5062    return false;
5063  }
5064  case ARM::STRD_PRE:
5065  case ARM::STRD_POST:
5066  case ARM::STREXD: {
5067    // Rt2 must be Rt + 1.
5068    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5069    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5070    if (Rt2 != Rt + 1)
5071      return Error(Operands[3]->getStartLoc(),
5072                   "source operands must be sequential");
5073    return false;
5074  }
5075  case ARM::SBFX:
5076  case ARM::UBFX: {
5077    // width must be in range [1, 32-lsb]
5078    unsigned lsb = Inst.getOperand(2).getImm();
5079    unsigned widthm1 = Inst.getOperand(3).getImm();
5080    if (widthm1 >= 32 - lsb)
5081      return Error(Operands[5]->getStartLoc(),
5082                   "bitfield width must be in range [1,32-lsb]");
5083    return false;
5084  }
5085  case ARM::tLDMIA: {
5086    // If we're parsing Thumb2, the .w variant is available and handles
5087    // most cases that are normally illegal for a Thumb1 LDM
5088    // instruction. We'll make the transformation in processInstruction()
5089    // if necessary.
5090    //
5091    // Thumb LDM instructions are writeback iff the base register is not
5092    // in the register list.
5093    unsigned Rn = Inst.getOperand(0).getReg();
5094    bool hasWritebackToken =
5095      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5096       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5097    bool listContainsBase;
5098    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5099      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5100                   "registers must be in range r0-r7");
5101    // If we should have writeback, then there should be a '!' token.
5102    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5103      return Error(Operands[2]->getStartLoc(),
5104                   "writeback operator '!' expected");
5105    // If we should not have writeback, there must not be a '!'. This is
5106    // true even for the 32-bit wide encodings.
5107    if (listContainsBase && hasWritebackToken)
5108      return Error(Operands[3]->getStartLoc(),
5109                   "writeback operator '!' not allowed when base register "
5110                   "in register list");
5111
5112    break;
5113  }
5114  case ARM::t2LDMIA_UPD: {
5115    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5116      return Error(Operands[4]->getStartLoc(),
5117                   "writeback operator '!' not allowed when base register "
5118                   "in register list");
5119    break;
5120  }
5121  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5122  // so only issue a diagnostic for thumb1. The instructions will be
5123  // switched to the t2 encodings in processInstruction() if necessary.
5124  case ARM::tPOP: {
5125    bool listContainsBase;
5126    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5127        !isThumbTwo())
5128      return Error(Operands[2]->getStartLoc(),
5129                   "registers must be in range r0-r7 or pc");
5130    break;
5131  }
5132  case ARM::tPUSH: {
5133    bool listContainsBase;
5134    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5135        !isThumbTwo())
5136      return Error(Operands[2]->getStartLoc(),
5137                   "registers must be in range r0-r7 or lr");
5138    break;
5139  }
5140  case ARM::tSTMIA_UPD: {
5141    bool listContainsBase;
5142    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5143      return Error(Operands[4]->getStartLoc(),
5144                   "registers must be in range r0-r7");
5145    break;
5146  }
5147  }
5148
5149  return false;
5150}
5151
5152static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5153  switch(Opc) {
5154  default: assert(0 && "unexpected opcode!");
5155  // VST1LN
5156  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5157  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5158  case ARM::VST1LNdWB_fixed_Asm_U8:
5159    Spacing = 1;
5160    return ARM::VST1LNd8_UPD;
5161  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5162  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5163  case ARM::VST1LNdWB_fixed_Asm_U16:
5164    Spacing = 1;
5165    return ARM::VST1LNd16_UPD;
5166  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5167  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5168  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5169    Spacing = 1;
5170    return ARM::VST1LNd32_UPD;
5171  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5172  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5173  case ARM::VST1LNdWB_register_Asm_U8:
5174    Spacing = 1;
5175    return ARM::VST1LNd8_UPD;
5176  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5177  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5178  case ARM::VST1LNdWB_register_Asm_U16:
5179    Spacing = 1;
5180    return ARM::VST1LNd16_UPD;
5181  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5182  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5183  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5184    Spacing = 1;
5185    return ARM::VST1LNd32_UPD;
5186  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5187  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5188  case ARM::VST1LNdAsm_U8:
5189    Spacing = 1;
5190    return ARM::VST1LNd8;
5191  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5192  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5193  case ARM::VST1LNdAsm_U16:
5194    Spacing = 1;
5195    return ARM::VST1LNd16;
5196  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5197  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5198  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5199    Spacing = 1;
5200    return ARM::VST1LNd32;
5201
5202  // VST2LN
5203  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5204  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5205  case ARM::VST2LNdWB_fixed_Asm_U8:
5206    Spacing = 1;
5207    return ARM::VST2LNd8_UPD;
5208  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5209  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5210  case ARM::VST2LNdWB_fixed_Asm_U16:
5211    Spacing = 1;
5212    return ARM::VST2LNd16_UPD;
5213  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5214  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5215  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5216    Spacing = 1;
5217    return ARM::VST2LNd32_UPD;
5218  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5219  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5220  case ARM::VST2LNqWB_fixed_Asm_U16:
5221    Spacing = 2;
5222    return ARM::VST2LNq16_UPD;
5223  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5224  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5225  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5226    Spacing = 2;
5227    return ARM::VST2LNq32_UPD;
5228
5229  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5230  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5231  case ARM::VST2LNdWB_register_Asm_U8:
5232    Spacing = 1;
5233    return ARM::VST2LNd8_UPD;
5234  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5235  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5236  case ARM::VST2LNdWB_register_Asm_U16:
5237    Spacing = 1;
5238    return ARM::VST2LNd16_UPD;
5239  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5240  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5241  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5242    Spacing = 1;
5243    return ARM::VST2LNd32_UPD;
5244  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5245  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5246  case ARM::VST2LNqWB_register_Asm_U16:
5247    Spacing = 2;
5248    return ARM::VST2LNq16_UPD;
5249  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5250  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5251  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5252    Spacing = 2;
5253    return ARM::VST2LNq32_UPD;
5254
5255  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5256  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5257  case ARM::VST2LNdAsm_U8:
5258    Spacing = 1;
5259    return ARM::VST2LNd8;
5260  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5261  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5262  case ARM::VST2LNdAsm_U16:
5263    Spacing = 1;
5264    return ARM::VST2LNd16;
5265  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5266  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5267  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5268    Spacing = 1;
5269    return ARM::VST2LNd32;
5270  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5271  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5272  case ARM::VST2LNqAsm_U16:
5273    Spacing = 2;
5274    return ARM::VST2LNq16;
5275  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5276  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5277  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5278    Spacing = 2;
5279    return ARM::VST2LNq32;
5280  }
5281}
5282
5283static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5284  switch(Opc) {
5285  default: assert(0 && "unexpected opcode!");
5286  // VLD1LN
5287  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5288  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5289  case ARM::VLD1LNdWB_fixed_Asm_U8:
5290    Spacing = 1;
5291    return ARM::VLD1LNd8_UPD;
5292  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5293  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5294  case ARM::VLD1LNdWB_fixed_Asm_U16:
5295    Spacing = 1;
5296    return ARM::VLD1LNd16_UPD;
5297  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5298  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5299  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5300    Spacing = 1;
5301    return ARM::VLD1LNd32_UPD;
5302  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5303  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5304  case ARM::VLD1LNdWB_register_Asm_U8:
5305    Spacing = 1;
5306    return ARM::VLD1LNd8_UPD;
5307  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5308  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5309  case ARM::VLD1LNdWB_register_Asm_U16:
5310    Spacing = 1;
5311    return ARM::VLD1LNd16_UPD;
5312  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5313  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5314  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5315    Spacing = 1;
5316    return ARM::VLD1LNd32_UPD;
5317  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5318  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5319  case ARM::VLD1LNdAsm_U8:
5320    Spacing = 1;
5321    return ARM::VLD1LNd8;
5322  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5323  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5324  case ARM::VLD1LNdAsm_U16:
5325    Spacing = 1;
5326    return ARM::VLD1LNd16;
5327  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5328  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5329  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5330    Spacing = 1;
5331    return ARM::VLD1LNd32;
5332
5333  // VLD2LN
5334  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5335  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5336  case ARM::VLD2LNdWB_fixed_Asm_U8:
5337    Spacing = 1;
5338    return ARM::VLD2LNd8_UPD;
5339  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5340  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5341  case ARM::VLD2LNdWB_fixed_Asm_U16:
5342    Spacing = 1;
5343    return ARM::VLD2LNd16_UPD;
5344  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5345  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5346  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5347    Spacing = 1;
5348    return ARM::VLD2LNd32_UPD;
5349  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5350  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5351  case ARM::VLD2LNqWB_fixed_Asm_U16:
5352    Spacing = 1;
5353    return ARM::VLD2LNq16_UPD;
5354  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5355  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5356  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5357    Spacing = 2;
5358    return ARM::VLD2LNq32_UPD;
5359  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5360  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5361  case ARM::VLD2LNdWB_register_Asm_U8:
5362    Spacing = 1;
5363    return ARM::VLD2LNd8_UPD;
5364  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5365  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5366  case ARM::VLD2LNdWB_register_Asm_U16:
5367    Spacing = 1;
5368    return ARM::VLD2LNd16_UPD;
5369  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5370  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5371  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5372    Spacing = 1;
5373    return ARM::VLD2LNd32_UPD;
5374  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5375  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5376  case ARM::VLD2LNqWB_register_Asm_U16:
5377    Spacing = 2;
5378    return ARM::VLD2LNq16_UPD;
5379  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5380  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5381  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5382    Spacing = 2;
5383    return ARM::VLD2LNq32_UPD;
5384  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5385  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5386  case ARM::VLD2LNdAsm_U8:
5387    Spacing = 1;
5388    return ARM::VLD2LNd8;
5389  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5390  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5391  case ARM::VLD2LNdAsm_U16:
5392    Spacing = 1;
5393    return ARM::VLD2LNd16;
5394  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5395  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5396  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5397    Spacing = 1;
5398    return ARM::VLD2LNd32;
5399  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5400  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5401  case ARM::VLD2LNqAsm_U16:
5402    Spacing = 2;
5403    return ARM::VLD2LNq16;
5404  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5405  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5406  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5407    Spacing = 2;
5408    return ARM::VLD2LNq32;
5409  }
5410}
5411
5412bool ARMAsmParser::
5413processInstruction(MCInst &Inst,
5414                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5415  switch (Inst.getOpcode()) {
5416  // Aliases for alternate PC+imm syntax of LDR instructions.
5417  case ARM::t2LDRpcrel:
5418    Inst.setOpcode(ARM::t2LDRpci);
5419    return true;
5420  case ARM::t2LDRBpcrel:
5421    Inst.setOpcode(ARM::t2LDRBpci);
5422    return true;
5423  case ARM::t2LDRHpcrel:
5424    Inst.setOpcode(ARM::t2LDRHpci);
5425    return true;
5426  case ARM::t2LDRSBpcrel:
5427    Inst.setOpcode(ARM::t2LDRSBpci);
5428    return true;
5429  case ARM::t2LDRSHpcrel:
5430    Inst.setOpcode(ARM::t2LDRSHpci);
5431    return true;
5432  // Handle NEON VST complex aliases.
5433  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5434  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5435  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5436  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5437  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5438  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5439  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5440  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5441    MCInst TmpInst;
5442    // Shuffle the operands around so the lane index operand is in the
5443    // right place.
5444    unsigned Spacing;
5445    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5446    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5447    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5448    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5449    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5450    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5451    TmpInst.addOperand(Inst.getOperand(1)); // lane
5452    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5453    TmpInst.addOperand(Inst.getOperand(6));
5454    Inst = TmpInst;
5455    return true;
5456  }
5457
5458  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5459  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5460  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5461  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5462  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5463  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5464  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5465  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5466  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5467  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5468  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5469  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5470  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5471  case ARM::VST2LNqWB_register_Asm_U32: {
5472    MCInst TmpInst;
5473    // Shuffle the operands around so the lane index operand is in the
5474    // right place.
5475    unsigned Spacing;
5476    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5477    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5478    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5479    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5480    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5481    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5482    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5483                                            Spacing));
5484    TmpInst.addOperand(Inst.getOperand(1)); // lane
5485    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5486    TmpInst.addOperand(Inst.getOperand(6));
5487    Inst = TmpInst;
5488    return true;
5489  }
5490  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5491  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5492  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5493  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5494  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5495  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5496  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5497  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5498    MCInst TmpInst;
5499    // Shuffle the operands around so the lane index operand is in the
5500    // right place.
5501    unsigned Spacing;
5502    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5503    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5504    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5505    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5506    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5507    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5508    TmpInst.addOperand(Inst.getOperand(1)); // lane
5509    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5510    TmpInst.addOperand(Inst.getOperand(5));
5511    Inst = TmpInst;
5512    return true;
5513  }
5514
5515  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5516  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5517  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5518  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5519  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5520  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5521  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5522  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5523  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5524  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5525  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5526  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5527  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5528  case ARM::VST2LNqWB_fixed_Asm_U32: {
5529    MCInst TmpInst;
5530    // Shuffle the operands around so the lane index operand is in the
5531    // right place.
5532    unsigned Spacing;
5533    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5534    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5535    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5536    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5537    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5538    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5539    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5540                                            Spacing));
5541    TmpInst.addOperand(Inst.getOperand(1)); // lane
5542    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5543    TmpInst.addOperand(Inst.getOperand(5));
5544    Inst = TmpInst;
5545    return true;
5546  }
5547  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5548  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5549  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5550  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5551  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5552  case ARM::VST1LNdAsm_U32: {
5553    MCInst TmpInst;
5554    // Shuffle the operands around so the lane index operand is in the
5555    // right place.
5556    unsigned Spacing;
5557    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5558    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5559    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5560    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5561    TmpInst.addOperand(Inst.getOperand(1)); // lane
5562    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5563    TmpInst.addOperand(Inst.getOperand(5));
5564    Inst = TmpInst;
5565    return true;
5566  }
5567
5568  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5569  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5570  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5571  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5572  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5573  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5574  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5575  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5576  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5577    MCInst TmpInst;
5578    // Shuffle the operands around so the lane index operand is in the
5579    // right place.
5580    unsigned Spacing;
5581    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5582    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5583    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5584    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5585    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5586                                            Spacing));
5587    TmpInst.addOperand(Inst.getOperand(1)); // lane
5588    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5589    TmpInst.addOperand(Inst.getOperand(5));
5590    Inst = TmpInst;
5591    return true;
5592  }
5593  // Handle NEON VLD complex aliases.
5594  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5595  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5596  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5597  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5598  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5599  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5600  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5601  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5602    MCInst TmpInst;
5603    // Shuffle the operands around so the lane index operand is in the
5604    // right place.
5605    unsigned Spacing;
5606    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5607    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5608    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5609    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5610    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5611    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5612    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5613    TmpInst.addOperand(Inst.getOperand(1)); // lane
5614    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5615    TmpInst.addOperand(Inst.getOperand(6));
5616    Inst = TmpInst;
5617    return true;
5618  }
5619
5620  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5621  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5622  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5623  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5624  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5625  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5626  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5627  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5628  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5629  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5630  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5631  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5632  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5633  case ARM::VLD2LNqWB_register_Asm_U32: {
5634    MCInst TmpInst;
5635    // Shuffle the operands around so the lane index operand is in the
5636    // right place.
5637    unsigned Spacing;
5638    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5639    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5640    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5641                                            Spacing));
5642    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5643    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5644    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5645    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5646    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5647    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5648                                            Spacing));
5649    TmpInst.addOperand(Inst.getOperand(1)); // lane
5650    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5651    TmpInst.addOperand(Inst.getOperand(6));
5652    Inst = TmpInst;
5653    return true;
5654  }
5655
5656  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5657  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5658  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5659  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5660  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5661  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5662  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5663  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5664    MCInst TmpInst;
5665    // Shuffle the operands around so the lane index operand is in the
5666    // right place.
5667    unsigned Spacing;
5668    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5669    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5670    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5671    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5672    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5673    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5674    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5675    TmpInst.addOperand(Inst.getOperand(1)); // lane
5676    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5677    TmpInst.addOperand(Inst.getOperand(5));
5678    Inst = TmpInst;
5679    return true;
5680  }
5681
5682  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5683  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5684  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5685  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5686  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5687  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5688  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5689  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5690  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5691  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5692  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5693  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5694  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5695  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5696    MCInst TmpInst;
5697    // Shuffle the operands around so the lane index operand is in the
5698    // right place.
5699    unsigned Spacing;
5700    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5701    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5702    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5703                                            Spacing));
5704    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5705    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5706    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5707    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5708    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5709    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5710                                            Spacing));
5711    TmpInst.addOperand(Inst.getOperand(1)); // lane
5712    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5713    TmpInst.addOperand(Inst.getOperand(5));
5714    Inst = TmpInst;
5715    return true;
5716  }
5717
5718  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5719  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5720  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5721  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5722  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5723  case ARM::VLD1LNdAsm_U32: {
5724    MCInst TmpInst;
5725    // Shuffle the operands around so the lane index operand is in the
5726    // right place.
5727    unsigned Spacing;
5728    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5729    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5730    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5731    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5732    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5733    TmpInst.addOperand(Inst.getOperand(1)); // lane
5734    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5735    TmpInst.addOperand(Inst.getOperand(5));
5736    Inst = TmpInst;
5737    return true;
5738  }
5739
5740  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5741  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5742  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5743  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5744  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5745  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5746  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5747  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5748  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5749  case ARM::VLD2LNqAsm_U32: {
5750    MCInst TmpInst;
5751    // Shuffle the operands around so the lane index operand is in the
5752    // right place.
5753    unsigned Spacing;
5754    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5755    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5756    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5757                                            Spacing));
5758    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5759    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5760    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5761    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5762                                            Spacing));
5763    TmpInst.addOperand(Inst.getOperand(1)); // lane
5764    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5765    TmpInst.addOperand(Inst.getOperand(5));
5766    Inst = TmpInst;
5767    return true;
5768  }
5769  // Handle the Thumb2 mode MOV complex aliases.
5770  case ARM::t2MOVsr:
5771  case ARM::t2MOVSsr: {
5772    // Which instruction to expand to depends on the CCOut operand and
5773    // whether we're in an IT block if the register operands are low
5774    // registers.
5775    bool isNarrow = false;
5776    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5777        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5778        isARMLowRegister(Inst.getOperand(2).getReg()) &&
5779        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
5780        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
5781      isNarrow = true;
5782    MCInst TmpInst;
5783    unsigned newOpc;
5784    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
5785    default: llvm_unreachable("unexpected opcode!");
5786    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
5787    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
5788    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
5789    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
5790    }
5791    TmpInst.setOpcode(newOpc);
5792    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5793    if (isNarrow)
5794      TmpInst.addOperand(MCOperand::CreateReg(
5795          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5796    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5797    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5798    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5799    TmpInst.addOperand(Inst.getOperand(5));
5800    if (!isNarrow)
5801      TmpInst.addOperand(MCOperand::CreateReg(
5802          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5803    Inst = TmpInst;
5804    return true;
5805  }
5806  case ARM::t2MOVsi:
5807  case ARM::t2MOVSsi: {
5808    // Which instruction to expand to depends on the CCOut operand and
5809    // whether we're in an IT block if the register operands are low
5810    // registers.
5811    bool isNarrow = false;
5812    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5813        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5814        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5815      isNarrow = true;
5816    MCInst TmpInst;
5817    unsigned newOpc;
5818    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5819    default: llvm_unreachable("unexpected opcode!");
5820    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5821    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5822    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5823    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5824    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
5825    }
5826    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5827    if (Ammount == 32) Ammount = 0;
5828    TmpInst.setOpcode(newOpc);
5829    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5830    if (isNarrow)
5831      TmpInst.addOperand(MCOperand::CreateReg(
5832          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5833    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5834    if (newOpc != ARM::t2RRX)
5835      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5836    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5837    TmpInst.addOperand(Inst.getOperand(4));
5838    if (!isNarrow)
5839      TmpInst.addOperand(MCOperand::CreateReg(
5840          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5841    Inst = TmpInst;
5842    return true;
5843  }
5844  // Handle the ARM mode MOV complex aliases.
5845  case ARM::ASRr:
5846  case ARM::LSRr:
5847  case ARM::LSLr:
5848  case ARM::RORr: {
5849    ARM_AM::ShiftOpc ShiftTy;
5850    switch(Inst.getOpcode()) {
5851    default: llvm_unreachable("unexpected opcode!");
5852    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5853    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5854    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5855    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5856    }
5857    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5858    MCInst TmpInst;
5859    TmpInst.setOpcode(ARM::MOVsr);
5860    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5861    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5862    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5863    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5864    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5865    TmpInst.addOperand(Inst.getOperand(4));
5866    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5867    Inst = TmpInst;
5868    return true;
5869  }
5870  case ARM::ASRi:
5871  case ARM::LSRi:
5872  case ARM::LSLi:
5873  case ARM::RORi: {
5874    ARM_AM::ShiftOpc ShiftTy;
5875    switch(Inst.getOpcode()) {
5876    default: llvm_unreachable("unexpected opcode!");
5877    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5878    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5879    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5880    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5881    }
5882    // A shift by zero is a plain MOVr, not a MOVsi.
5883    unsigned Amt = Inst.getOperand(2).getImm();
5884    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5885    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5886    MCInst TmpInst;
5887    TmpInst.setOpcode(Opc);
5888    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5889    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5890    if (Opc == ARM::MOVsi)
5891      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5892    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5893    TmpInst.addOperand(Inst.getOperand(4));
5894    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5895    Inst = TmpInst;
5896    return true;
5897  }
5898  case ARM::RRXi: {
5899    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5900    MCInst TmpInst;
5901    TmpInst.setOpcode(ARM::MOVsi);
5902    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5903    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5904    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5905    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5906    TmpInst.addOperand(Inst.getOperand(3));
5907    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5908    Inst = TmpInst;
5909    return true;
5910  }
5911  case ARM::t2LDMIA_UPD: {
5912    // If this is a load of a single register, then we should use
5913    // a post-indexed LDR instruction instead, per the ARM ARM.
5914    if (Inst.getNumOperands() != 5)
5915      return false;
5916    MCInst TmpInst;
5917    TmpInst.setOpcode(ARM::t2LDR_POST);
5918    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5919    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5920    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5921    TmpInst.addOperand(MCOperand::CreateImm(4));
5922    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5923    TmpInst.addOperand(Inst.getOperand(3));
5924    Inst = TmpInst;
5925    return true;
5926  }
5927  case ARM::t2STMDB_UPD: {
5928    // If this is a store of a single register, then we should use
5929    // a pre-indexed STR instruction instead, per the ARM ARM.
5930    if (Inst.getNumOperands() != 5)
5931      return false;
5932    MCInst TmpInst;
5933    TmpInst.setOpcode(ARM::t2STR_PRE);
5934    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5935    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5936    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5937    TmpInst.addOperand(MCOperand::CreateImm(-4));
5938    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5939    TmpInst.addOperand(Inst.getOperand(3));
5940    Inst = TmpInst;
5941    return true;
5942  }
5943  case ARM::LDMIA_UPD:
5944    // If this is a load of a single register via a 'pop', then we should use
5945    // a post-indexed LDR instruction instead, per the ARM ARM.
5946    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5947        Inst.getNumOperands() == 5) {
5948      MCInst TmpInst;
5949      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5950      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5951      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5952      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5953      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5954      TmpInst.addOperand(MCOperand::CreateImm(4));
5955      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5956      TmpInst.addOperand(Inst.getOperand(3));
5957      Inst = TmpInst;
5958      return true;
5959    }
5960    break;
5961  case ARM::STMDB_UPD:
5962    // If this is a store of a single register via a 'push', then we should use
5963    // a pre-indexed STR instruction instead, per the ARM ARM.
5964    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5965        Inst.getNumOperands() == 5) {
5966      MCInst TmpInst;
5967      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5968      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5969      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5970      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5971      TmpInst.addOperand(MCOperand::CreateImm(-4));
5972      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5973      TmpInst.addOperand(Inst.getOperand(3));
5974      Inst = TmpInst;
5975    }
5976    break;
5977  case ARM::t2ADDri12:
5978    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5979    // mnemonic was used (not "addw"), encoding T3 is preferred.
5980    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5981        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5982      break;
5983    Inst.setOpcode(ARM::t2ADDri);
5984    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5985    break;
5986  case ARM::t2SUBri12:
5987    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5988    // mnemonic was used (not "subw"), encoding T3 is preferred.
5989    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5990        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5991      break;
5992    Inst.setOpcode(ARM::t2SUBri);
5993    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5994    break;
5995  case ARM::tADDi8:
5996    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5997    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5998    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5999    // to encoding T1 if <Rd> is omitted."
6000    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6001      Inst.setOpcode(ARM::tADDi3);
6002      return true;
6003    }
6004    break;
6005  case ARM::tSUBi8:
6006    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6007    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6008    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6009    // to encoding T1 if <Rd> is omitted."
6010    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6011      Inst.setOpcode(ARM::tSUBi3);
6012      return true;
6013    }
6014    break;
6015  case ARM::t2ADDrr: {
6016    // If the destination and first source operand are the same, and
6017    // there's no setting of the flags, use encoding T2 instead of T3.
6018    // Note that this is only for ADD, not SUB. This mirrors the system
6019    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6020    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6021        Inst.getOperand(5).getReg() != 0 ||
6022        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6023         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6024      break;
6025    MCInst TmpInst;
6026    TmpInst.setOpcode(ARM::tADDhirr);
6027    TmpInst.addOperand(Inst.getOperand(0));
6028    TmpInst.addOperand(Inst.getOperand(0));
6029    TmpInst.addOperand(Inst.getOperand(2));
6030    TmpInst.addOperand(Inst.getOperand(3));
6031    TmpInst.addOperand(Inst.getOperand(4));
6032    Inst = TmpInst;
6033    return true;
6034  }
6035  case ARM::tB:
6036    // A Thumb conditional branch outside of an IT block is a tBcc.
6037    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6038      Inst.setOpcode(ARM::tBcc);
6039      return true;
6040    }
6041    break;
6042  case ARM::t2B:
6043    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6044    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6045      Inst.setOpcode(ARM::t2Bcc);
6046      return true;
6047    }
6048    break;
6049  case ARM::t2Bcc:
6050    // If the conditional is AL or we're in an IT block, we really want t2B.
6051    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6052      Inst.setOpcode(ARM::t2B);
6053      return true;
6054    }
6055    break;
6056  case ARM::tBcc:
6057    // If the conditional is AL, we really want tB.
6058    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6059      Inst.setOpcode(ARM::tB);
6060      return true;
6061    }
6062    break;
6063  case ARM::tLDMIA: {
6064    // If the register list contains any high registers, or if the writeback
6065    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6066    // instead if we're in Thumb2. Otherwise, this should have generated
6067    // an error in validateInstruction().
6068    unsigned Rn = Inst.getOperand(0).getReg();
6069    bool hasWritebackToken =
6070      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6071       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6072    bool listContainsBase;
6073    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6074        (!listContainsBase && !hasWritebackToken) ||
6075        (listContainsBase && hasWritebackToken)) {
6076      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6077      assert (isThumbTwo());
6078      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6079      // If we're switching to the updating version, we need to insert
6080      // the writeback tied operand.
6081      if (hasWritebackToken)
6082        Inst.insert(Inst.begin(),
6083                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6084      return true;
6085    }
6086    break;
6087  }
6088  case ARM::tSTMIA_UPD: {
6089    // If the register list contains any high registers, we need to use
6090    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6091    // should have generated an error in validateInstruction().
6092    unsigned Rn = Inst.getOperand(0).getReg();
6093    bool listContainsBase;
6094    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6095      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6096      assert (isThumbTwo());
6097      Inst.setOpcode(ARM::t2STMIA_UPD);
6098      return true;
6099    }
6100    break;
6101  }
6102  case ARM::tPOP: {
6103    bool listContainsBase;
6104    // If the register list contains any high registers, we need to use
6105    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6106    // should have generated an error in validateInstruction().
6107    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6108      return false;
6109    assert (isThumbTwo());
6110    Inst.setOpcode(ARM::t2LDMIA_UPD);
6111    // Add the base register and writeback operands.
6112    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6113    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6114    return true;
6115  }
6116  case ARM::tPUSH: {
6117    bool listContainsBase;
6118    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6119      return false;
6120    assert (isThumbTwo());
6121    Inst.setOpcode(ARM::t2STMDB_UPD);
6122    // Add the base register and writeback operands.
6123    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6124    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6125    return true;
6126  }
6127  case ARM::t2MOVi: {
6128    // If we can use the 16-bit encoding and the user didn't explicitly
6129    // request the 32-bit variant, transform it here.
6130    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6131        Inst.getOperand(1).getImm() <= 255 &&
6132        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6133         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6134        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6135        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6136         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6137      // The operands aren't in the same order for tMOVi8...
6138      MCInst TmpInst;
6139      TmpInst.setOpcode(ARM::tMOVi8);
6140      TmpInst.addOperand(Inst.getOperand(0));
6141      TmpInst.addOperand(Inst.getOperand(4));
6142      TmpInst.addOperand(Inst.getOperand(1));
6143      TmpInst.addOperand(Inst.getOperand(2));
6144      TmpInst.addOperand(Inst.getOperand(3));
6145      Inst = TmpInst;
6146      return true;
6147    }
6148    break;
6149  }
6150  case ARM::t2MOVr: {
6151    // If we can use the 16-bit encoding and the user didn't explicitly
6152    // request the 32-bit variant, transform it here.
6153    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6154        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6155        Inst.getOperand(2).getImm() == ARMCC::AL &&
6156        Inst.getOperand(4).getReg() == ARM::CPSR &&
6157        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6158         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6159      // The operands aren't the same for tMOV[S]r... (no cc_out)
6160      MCInst TmpInst;
6161      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6162      TmpInst.addOperand(Inst.getOperand(0));
6163      TmpInst.addOperand(Inst.getOperand(1));
6164      TmpInst.addOperand(Inst.getOperand(2));
6165      TmpInst.addOperand(Inst.getOperand(3));
6166      Inst = TmpInst;
6167      return true;
6168    }
6169    break;
6170  }
6171  case ARM::t2SXTH:
6172  case ARM::t2SXTB:
6173  case ARM::t2UXTH:
6174  case ARM::t2UXTB: {
6175    // If we can use the 16-bit encoding and the user didn't explicitly
6176    // request the 32-bit variant, transform it here.
6177    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6178        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6179        Inst.getOperand(2).getImm() == 0 &&
6180        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6181         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6182      unsigned NewOpc;
6183      switch (Inst.getOpcode()) {
6184      default: llvm_unreachable("Illegal opcode!");
6185      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6186      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6187      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6188      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6189      }
6190      // The operands aren't the same for thumb1 (no rotate operand).
6191      MCInst TmpInst;
6192      TmpInst.setOpcode(NewOpc);
6193      TmpInst.addOperand(Inst.getOperand(0));
6194      TmpInst.addOperand(Inst.getOperand(1));
6195      TmpInst.addOperand(Inst.getOperand(3));
6196      TmpInst.addOperand(Inst.getOperand(4));
6197      Inst = TmpInst;
6198      return true;
6199    }
6200    break;
6201  }
6202  case ARM::MOVsi: {
6203    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6204    if (SOpc == ARM_AM::rrx) return false;
6205    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6206      // Shifting by zero is accepted as a vanilla 'MOVr'
6207      MCInst TmpInst;
6208      TmpInst.setOpcode(ARM::MOVr);
6209      TmpInst.addOperand(Inst.getOperand(0));
6210      TmpInst.addOperand(Inst.getOperand(1));
6211      TmpInst.addOperand(Inst.getOperand(3));
6212      TmpInst.addOperand(Inst.getOperand(4));
6213      TmpInst.addOperand(Inst.getOperand(5));
6214      Inst = TmpInst;
6215      return true;
6216    }
6217    return false;
6218  }
6219  case ARM::ANDrsi:
6220  case ARM::ORRrsi:
6221  case ARM::EORrsi:
6222  case ARM::BICrsi:
6223  case ARM::SUBrsi:
6224  case ARM::ADDrsi: {
6225    unsigned newOpc;
6226    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
6227    if (SOpc == ARM_AM::rrx) return false;
6228    switch (Inst.getOpcode()) {
6229    default: assert(0 && "unexpected opcode!");
6230    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
6231    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
6232    case ARM::EORrsi: newOpc = ARM::EORrr; break;
6233    case ARM::BICrsi: newOpc = ARM::BICrr; break;
6234    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
6235    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
6236    }
6237    // If the shift is by zero, use the non-shifted instruction definition.
6238    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
6239      MCInst TmpInst;
6240      TmpInst.setOpcode(newOpc);
6241      TmpInst.addOperand(Inst.getOperand(0));
6242      TmpInst.addOperand(Inst.getOperand(1));
6243      TmpInst.addOperand(Inst.getOperand(2));
6244      TmpInst.addOperand(Inst.getOperand(4));
6245      TmpInst.addOperand(Inst.getOperand(5));
6246      TmpInst.addOperand(Inst.getOperand(6));
6247      Inst = TmpInst;
6248      return true;
6249    }
6250    return false;
6251  }
6252  case ARM::t2IT: {
6253    // The mask bits for all but the first condition are represented as
6254    // the low bit of the condition code value implies 't'. We currently
6255    // always have 1 implies 't', so XOR toggle the bits if the low bit
6256    // of the condition code is zero. The encoding also expects the low
6257    // bit of the condition to be encoded as bit 4 of the mask operand,
6258    // so mask that in if needed
6259    MCOperand &MO = Inst.getOperand(1);
6260    unsigned Mask = MO.getImm();
6261    unsigned OrigMask = Mask;
6262    unsigned TZ = CountTrailingZeros_32(Mask);
6263    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6264      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6265      for (unsigned i = 3; i != TZ; --i)
6266        Mask ^= 1 << i;
6267    } else
6268      Mask |= 0x10;
6269    MO.setImm(Mask);
6270
6271    // Set up the IT block state according to the IT instruction we just
6272    // matched.
6273    assert(!inITBlock() && "nested IT blocks?!");
6274    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6275    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6276    ITState.CurPosition = 0;
6277    ITState.FirstCond = true;
6278    break;
6279  }
6280  }
6281  return false;
6282}
6283
6284unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6285  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6286  // suffix depending on whether they're in an IT block or not.
6287  unsigned Opc = Inst.getOpcode();
6288  const MCInstrDesc &MCID = getInstDesc(Opc);
6289  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6290    assert(MCID.hasOptionalDef() &&
6291           "optionally flag setting instruction missing optional def operand");
6292    assert(MCID.NumOperands == Inst.getNumOperands() &&
6293           "operand count mismatch!");
6294    // Find the optional-def operand (cc_out).
6295    unsigned OpNo;
6296    for (OpNo = 0;
6297         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6298         ++OpNo)
6299      ;
6300    // If we're parsing Thumb1, reject it completely.
6301    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6302      return Match_MnemonicFail;
6303    // If we're parsing Thumb2, which form is legal depends on whether we're
6304    // in an IT block.
6305    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6306        !inITBlock())
6307      return Match_RequiresITBlock;
6308    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6309        inITBlock())
6310      return Match_RequiresNotITBlock;
6311  }
6312  // Some high-register supporting Thumb1 encodings only allow both registers
6313  // to be from r0-r7 when in Thumb2.
6314  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6315           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6316           isARMLowRegister(Inst.getOperand(2).getReg()))
6317    return Match_RequiresThumb2;
6318  // Others only require ARMv6 or later.
6319  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6320           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6321           isARMLowRegister(Inst.getOperand(1).getReg()))
6322    return Match_RequiresV6;
6323  return Match_Success;
6324}
6325
6326bool ARMAsmParser::
6327MatchAndEmitInstruction(SMLoc IDLoc,
6328                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6329                        MCStreamer &Out) {
6330  MCInst Inst;
6331  unsigned ErrorInfo;
6332  unsigned MatchResult;
6333  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6334  switch (MatchResult) {
6335  default: break;
6336  case Match_Success:
6337    // Context sensitive operand constraints aren't handled by the matcher,
6338    // so check them here.
6339    if (validateInstruction(Inst, Operands)) {
6340      // Still progress the IT block, otherwise one wrong condition causes
6341      // nasty cascading errors.
6342      forwardITPosition();
6343      return true;
6344    }
6345
6346    // Some instructions need post-processing to, for example, tweak which
6347    // encoding is selected. Loop on it while changes happen so the
6348    // individual transformations can chain off each other. E.g.,
6349    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6350    while (processInstruction(Inst, Operands))
6351      ;
6352
6353    // Only move forward at the very end so that everything in validate
6354    // and process gets a consistent answer about whether we're in an IT
6355    // block.
6356    forwardITPosition();
6357
6358    Out.EmitInstruction(Inst);
6359    return false;
6360  case Match_MissingFeature:
6361    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6362    return true;
6363  case Match_InvalidOperand: {
6364    SMLoc ErrorLoc = IDLoc;
6365    if (ErrorInfo != ~0U) {
6366      if (ErrorInfo >= Operands.size())
6367        return Error(IDLoc, "too few operands for instruction");
6368
6369      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6370      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6371    }
6372
6373    return Error(ErrorLoc, "invalid operand for instruction");
6374  }
6375  case Match_MnemonicFail:
6376    return Error(IDLoc, "invalid instruction");
6377  case Match_ConversionFail:
6378    // The converter function will have already emited a diagnostic.
6379    return true;
6380  case Match_RequiresNotITBlock:
6381    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6382  case Match_RequiresITBlock:
6383    return Error(IDLoc, "instruction only valid inside IT block");
6384  case Match_RequiresV6:
6385    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6386  case Match_RequiresThumb2:
6387    return Error(IDLoc, "instruction variant requires Thumb2");
6388  }
6389
6390  llvm_unreachable("Implement any new match types added!");
6391  return true;
6392}
6393
6394/// parseDirective parses the arm specific directives
6395bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6396  StringRef IDVal = DirectiveID.getIdentifier();
6397  if (IDVal == ".word")
6398    return parseDirectiveWord(4, DirectiveID.getLoc());
6399  else if (IDVal == ".thumb")
6400    return parseDirectiveThumb(DirectiveID.getLoc());
6401  else if (IDVal == ".arm")
6402    return parseDirectiveARM(DirectiveID.getLoc());
6403  else if (IDVal == ".thumb_func")
6404    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6405  else if (IDVal == ".code")
6406    return parseDirectiveCode(DirectiveID.getLoc());
6407  else if (IDVal == ".syntax")
6408    return parseDirectiveSyntax(DirectiveID.getLoc());
6409  else if (IDVal == ".unreq")
6410    return parseDirectiveUnreq(DirectiveID.getLoc());
6411  else if (IDVal == ".arch")
6412    return parseDirectiveArch(DirectiveID.getLoc());
6413  else if (IDVal == ".eabi_attribute")
6414    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6415  return true;
6416}
6417
6418/// parseDirectiveWord
6419///  ::= .word [ expression (, expression)* ]
6420bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6421  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6422    for (;;) {
6423      const MCExpr *Value;
6424      if (getParser().ParseExpression(Value))
6425        return true;
6426
6427      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6428
6429      if (getLexer().is(AsmToken::EndOfStatement))
6430        break;
6431
6432      // FIXME: Improve diagnostic.
6433      if (getLexer().isNot(AsmToken::Comma))
6434        return Error(L, "unexpected token in directive");
6435      Parser.Lex();
6436    }
6437  }
6438
6439  Parser.Lex();
6440  return false;
6441}
6442
6443/// parseDirectiveThumb
6444///  ::= .thumb
6445bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6446  if (getLexer().isNot(AsmToken::EndOfStatement))
6447    return Error(L, "unexpected token in directive");
6448  Parser.Lex();
6449
6450  if (!isThumb())
6451    SwitchMode();
6452  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6453  return false;
6454}
6455
6456/// parseDirectiveARM
6457///  ::= .arm
6458bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6459  if (getLexer().isNot(AsmToken::EndOfStatement))
6460    return Error(L, "unexpected token in directive");
6461  Parser.Lex();
6462
6463  if (isThumb())
6464    SwitchMode();
6465  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6466  return false;
6467}
6468
6469/// parseDirectiveThumbFunc
6470///  ::= .thumbfunc symbol_name
6471bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6472  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6473  bool isMachO = MAI.hasSubsectionsViaSymbols();
6474  StringRef Name;
6475  bool needFuncName = true;
6476
6477  // Darwin asm has (optionally) function name after .thumb_func direction
6478  // ELF doesn't
6479  if (isMachO) {
6480    const AsmToken &Tok = Parser.getTok();
6481    if (Tok.isNot(AsmToken::EndOfStatement)) {
6482      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6483        return Error(L, "unexpected token in .thumb_func directive");
6484      Name = Tok.getIdentifier();
6485      Parser.Lex(); // Consume the identifier token.
6486      needFuncName = false;
6487    }
6488  }
6489
6490  if (getLexer().isNot(AsmToken::EndOfStatement))
6491    return Error(L, "unexpected token in directive");
6492
6493  // Eat the end of statement and any blank lines that follow.
6494  while (getLexer().is(AsmToken::EndOfStatement))
6495    Parser.Lex();
6496
6497  // FIXME: assuming function name will be the line following .thumb_func
6498  // We really should be checking the next symbol definition even if there's
6499  // stuff in between.
6500  if (needFuncName) {
6501    Name = Parser.getTok().getIdentifier();
6502  }
6503
6504  // Mark symbol as a thumb symbol.
6505  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6506  getParser().getStreamer().EmitThumbFunc(Func);
6507  return false;
6508}
6509
6510/// parseDirectiveSyntax
6511///  ::= .syntax unified | divided
6512bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6513  const AsmToken &Tok = Parser.getTok();
6514  if (Tok.isNot(AsmToken::Identifier))
6515    return Error(L, "unexpected token in .syntax directive");
6516  StringRef Mode = Tok.getString();
6517  if (Mode == "unified" || Mode == "UNIFIED")
6518    Parser.Lex();
6519  else if (Mode == "divided" || Mode == "DIVIDED")
6520    return Error(L, "'.syntax divided' arm asssembly not supported");
6521  else
6522    return Error(L, "unrecognized syntax mode in .syntax directive");
6523
6524  if (getLexer().isNot(AsmToken::EndOfStatement))
6525    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6526  Parser.Lex();
6527
6528  // TODO tell the MC streamer the mode
6529  // getParser().getStreamer().Emit???();
6530  return false;
6531}
6532
6533/// parseDirectiveCode
6534///  ::= .code 16 | 32
6535bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6536  const AsmToken &Tok = Parser.getTok();
6537  if (Tok.isNot(AsmToken::Integer))
6538    return Error(L, "unexpected token in .code directive");
6539  int64_t Val = Parser.getTok().getIntVal();
6540  if (Val == 16)
6541    Parser.Lex();
6542  else if (Val == 32)
6543    Parser.Lex();
6544  else
6545    return Error(L, "invalid operand to .code directive");
6546
6547  if (getLexer().isNot(AsmToken::EndOfStatement))
6548    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6549  Parser.Lex();
6550
6551  if (Val == 16) {
6552    if (!isThumb())
6553      SwitchMode();
6554    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6555  } else {
6556    if (isThumb())
6557      SwitchMode();
6558    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6559  }
6560
6561  return false;
6562}
6563
6564/// parseDirectiveReq
6565///  ::= name .req registername
6566bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6567  Parser.Lex(); // Eat the '.req' token.
6568  unsigned Reg;
6569  SMLoc SRegLoc, ERegLoc;
6570  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6571    Parser.EatToEndOfStatement();
6572    return Error(SRegLoc, "register name expected");
6573  }
6574
6575  // Shouldn't be anything else.
6576  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6577    Parser.EatToEndOfStatement();
6578    return Error(Parser.getTok().getLoc(),
6579                 "unexpected input in .req directive.");
6580  }
6581
6582  Parser.Lex(); // Consume the EndOfStatement
6583
6584  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6585    return Error(SRegLoc, "redefinition of '" + Name +
6586                          "' does not match original.");
6587
6588  return false;
6589}
6590
6591/// parseDirectiveUneq
6592///  ::= .unreq registername
6593bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6594  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6595    Parser.EatToEndOfStatement();
6596    return Error(L, "unexpected input in .unreq directive.");
6597  }
6598  RegisterReqs.erase(Parser.getTok().getIdentifier());
6599  Parser.Lex(); // Eat the identifier.
6600  return false;
6601}
6602
6603/// parseDirectiveArch
6604///  ::= .arch token
6605bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6606  return true;
6607}
6608
6609/// parseDirectiveEabiAttr
6610///  ::= .eabi_attribute int, int
6611bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6612  return true;
6613}
6614
6615extern "C" void LLVMInitializeARMAsmLexer();
6616
6617/// Force static initialization.
6618extern "C" void LLVMInitializeARMAsmParser() {
6619  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6620  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6621  LLVMInitializeARMAsmLexer();
6622}
6623
6624#define GET_REGISTER_MATCHER
6625#define GET_MATCHER_IMPLEMENTATION
6626#include "ARMGenAsmMatcher.inc"
6627