ARMAsmParser.cpp revision 19055cc2712223f6834fc3cf5b547803ba83f066
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_FPImmediate,
274    k_MemBarrierOpt,
275    k_Memory,
276    k_PostIndexRegister,
277    k_MSRMask,
278    k_ProcIFlags,
279    k_VectorIndex,
280    k_Register,
281    k_RegisterList,
282    k_DPRRegisterList,
283    k_SPRRegisterList,
284    k_VectorList,
285    k_VectorListAllLanes,
286    k_VectorListIndexed,
287    k_ShiftedRegister,
288    k_ShiftedImmediate,
289    k_ShifterImmediate,
290    k_RotateImmediate,
291    k_BitfieldDescriptor,
292    k_Token
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296  SmallVector<unsigned, 8> Registers;
297
298  union {
299    struct {
300      ARMCC::CondCodes Val;
301    } CC;
302
303    struct {
304      unsigned Val;
305    } Cop;
306
307    struct {
308      unsigned Val;
309    } CoprocOption;
310
311    struct {
312      unsigned Mask:4;
313    } ITMask;
314
315    struct {
316      ARM_MB::MemBOpt Val;
317    } MBOpt;
318
319    struct {
320      ARM_PROC::IFlags Val;
321    } IFlags;
322
323    struct {
324      unsigned Val;
325    } MMask;
326
327    struct {
328      const char *Data;
329      unsigned Length;
330    } Tok;
331
332    struct {
333      unsigned RegNum;
334    } Reg;
335
336    // A vector register list is a sequential list of 1 to 4 registers.
337    struct {
338      unsigned RegNum;
339      unsigned Count;
340      unsigned LaneIndex;
341      bool isDoubleSpaced;
342    } VectorList;
343
344    struct {
345      unsigned Val;
346    } VectorIndex;
347
348    struct {
349      const MCExpr *Val;
350    } Imm;
351
352    struct {
353      unsigned Val;       // encoded 8-bit representation
354    } FPImm;
355
356    /// Combined record for all forms of ARM address expressions.
357    struct {
358      unsigned BaseRegNum;
359      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
360      // was specified.
361      const MCConstantExpr *OffsetImm;  // Offset immediate value
362      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
363      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
364      unsigned ShiftImm;        // shift for OffsetReg.
365      unsigned Alignment;       // 0 = no alignment specified
366                                // n = alignment in bytes (2, 4, 8, 16, or 32)
367      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
368    } Memory;
369
370    struct {
371      unsigned RegNum;
372      bool isAdd;
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned ShiftImm;
375    } PostIdxReg;
376
377    struct {
378      bool isASR;
379      unsigned Imm;
380    } ShifterImm;
381    struct {
382      ARM_AM::ShiftOpc ShiftTy;
383      unsigned SrcReg;
384      unsigned ShiftReg;
385      unsigned ShiftImm;
386    } RegShiftedReg;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftImm;
391    } RegShiftedImm;
392    struct {
393      unsigned Imm;
394    } RotImm;
395    struct {
396      unsigned LSB;
397      unsigned Width;
398    } Bitfield;
399  };
400
401  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
402public:
403  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
404    Kind = o.Kind;
405    StartLoc = o.StartLoc;
406    EndLoc = o.EndLoc;
407    switch (Kind) {
408    case k_CondCode:
409      CC = o.CC;
410      break;
411    case k_ITCondMask:
412      ITMask = o.ITMask;
413      break;
414    case k_Token:
415      Tok = o.Tok;
416      break;
417    case k_CCOut:
418    case k_Register:
419      Reg = o.Reg;
420      break;
421    case k_RegisterList:
422    case k_DPRRegisterList:
423    case k_SPRRegisterList:
424      Registers = o.Registers;
425      break;
426    case k_VectorList:
427    case k_VectorListAllLanes:
428    case k_VectorListIndexed:
429      VectorList = o.VectorList;
430      break;
431    case k_CoprocNum:
432    case k_CoprocReg:
433      Cop = o.Cop;
434      break;
435    case k_CoprocOption:
436      CoprocOption = o.CoprocOption;
437      break;
438    case k_Immediate:
439      Imm = o.Imm;
440      break;
441    case k_FPImmediate:
442      FPImm = o.FPImm;
443      break;
444    case k_MemBarrierOpt:
445      MBOpt = o.MBOpt;
446      break;
447    case k_Memory:
448      Memory = o.Memory;
449      break;
450    case k_PostIndexRegister:
451      PostIdxReg = o.PostIdxReg;
452      break;
453    case k_MSRMask:
454      MMask = o.MMask;
455      break;
456    case k_ProcIFlags:
457      IFlags = o.IFlags;
458      break;
459    case k_ShifterImmediate:
460      ShifterImm = o.ShifterImm;
461      break;
462    case k_ShiftedRegister:
463      RegShiftedReg = o.RegShiftedReg;
464      break;
465    case k_ShiftedImmediate:
466      RegShiftedImm = o.RegShiftedImm;
467      break;
468    case k_RotateImmediate:
469      RotImm = o.RotImm;
470      break;
471    case k_BitfieldDescriptor:
472      Bitfield = o.Bitfield;
473      break;
474    case k_VectorIndex:
475      VectorIndex = o.VectorIndex;
476      break;
477    }
478  }
479
480  /// getStartLoc - Get the location of the first token of this operand.
481  SMLoc getStartLoc() const { return StartLoc; }
482  /// getEndLoc - Get the location of the last token of this operand.
483  SMLoc getEndLoc() const { return EndLoc; }
484
485  ARMCC::CondCodes getCondCode() const {
486    assert(Kind == k_CondCode && "Invalid access!");
487    return CC.Val;
488  }
489
490  unsigned getCoproc() const {
491    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
492    return Cop.Val;
493  }
494
495  StringRef getToken() const {
496    assert(Kind == k_Token && "Invalid access!");
497    return StringRef(Tok.Data, Tok.Length);
498  }
499
500  unsigned getReg() const {
501    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
502    return Reg.RegNum;
503  }
504
505  const SmallVectorImpl<unsigned> &getRegList() const {
506    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
507            Kind == k_SPRRegisterList) && "Invalid access!");
508    return Registers;
509  }
510
511  const MCExpr *getImm() const {
512    assert(isImm() && "Invalid access!");
513    return Imm.Val;
514  }
515
516  unsigned getFPImm() const {
517    assert(Kind == k_FPImmediate && "Invalid access!");
518    return FPImm.Val;
519  }
520
521  unsigned getVectorIndex() const {
522    assert(Kind == k_VectorIndex && "Invalid access!");
523    return VectorIndex.Val;
524  }
525
526  ARM_MB::MemBOpt getMemBarrierOpt() const {
527    assert(Kind == k_MemBarrierOpt && "Invalid access!");
528    return MBOpt.Val;
529  }
530
531  ARM_PROC::IFlags getProcIFlags() const {
532    assert(Kind == k_ProcIFlags && "Invalid access!");
533    return IFlags.Val;
534  }
535
536  unsigned getMSRMask() const {
537    assert(Kind == k_MSRMask && "Invalid access!");
538    return MMask.Val;
539  }
540
541  bool isCoprocNum() const { return Kind == k_CoprocNum; }
542  bool isCoprocReg() const { return Kind == k_CoprocReg; }
543  bool isCoprocOption() const { return Kind == k_CoprocOption; }
544  bool isCondCode() const { return Kind == k_CondCode; }
545  bool isCCOut() const { return Kind == k_CCOut; }
546  bool isITMask() const { return Kind == k_ITCondMask; }
547  bool isITCondCode() const { return Kind == k_CondCode; }
548  bool isImm() const { return Kind == k_Immediate; }
549  bool isFPImm() const { return Kind == k_FPImmediate; }
550  bool isFBits16() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 0 && Value <= 16;
556  }
557  bool isFBits32() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return Value >= 1 && Value <= 32;
563  }
564  bool isImm8s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
570  }
571  bool isImm0_1020s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
577  }
578  bool isImm0_508s4() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
584  }
585  bool isImm0_255() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 256;
591  }
592  bool isImm0_1() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 2;
598  }
599  bool isImm0_3() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 4;
605  }
606  bool isImm0_7() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 8;
612  }
613  bool isImm0_15() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 16;
619  }
620  bool isImm0_31() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 32;
626  }
627  bool isImm0_63() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value >= 0 && Value < 64;
633  }
634  bool isImm8() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 8;
640  }
641  bool isImm16() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 16;
647  }
648  bool isImm32() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value == 32;
654  }
655  bool isShrImm8() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 8;
661  }
662  bool isShrImm16() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 16;
668  }
669  bool isShrImm32() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 32;
675  }
676  bool isShrImm64() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value <= 64;
682  }
683  bool isImm1_7() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 8;
689  }
690  bool isImm1_15() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 16;
696  }
697  bool isImm1_31() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 32;
703  }
704  bool isImm1_16() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 17;
710  }
711  bool isImm1_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value > 0 && Value < 33;
717  }
718  bool isImm0_32() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 33;
724  }
725  bool isImm0_65535() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    if (!CE) return false;
729    int64_t Value = CE->getValue();
730    return Value >= 0 && Value < 65536;
731  }
732  bool isImm0_65535Expr() const {
733    if (!isImm()) return false;
734    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
735    // If it's not a constant expression, it'll generate a fixup and be
736    // handled later.
737    if (!CE) return true;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value < 65536;
740  }
741  bool isImm24bit() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value >= 0 && Value <= 0xffffff;
747  }
748  bool isImmThumbSR() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value > 0 && Value < 33;
754  }
755  bool isPKHLSLImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value >= 0 && Value < 32;
761  }
762  bool isPKHASRImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return Value > 0 && Value <= 32;
768  }
769  bool isARMSOImm() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(Value) != -1;
775  }
776  bool isARMSOImmNot() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(~Value) != -1;
782  }
783  bool isARMSOImmNeg() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getSOImmVal(-Value) != -1;
789  }
790  bool isT2SOImm() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(Value) != -1;
796  }
797  bool isT2SOImmNot() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(~Value) != -1;
803  }
804  bool isT2SOImmNeg() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return ARM_AM::getT2SOImmVal(-Value) != -1;
810  }
811  bool isSetEndImm() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = CE->getValue();
816    return Value == 1 || Value == 0;
817  }
818  bool isReg() const { return Kind == k_Register; }
819  bool isRegList() const { return Kind == k_RegisterList; }
820  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
821  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
822  bool isToken() const { return Kind == k_Token; }
823  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
824  bool isMemory() const { return Kind == k_Memory; }
825  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
826  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
827  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
828  bool isRotImm() const { return Kind == k_RotateImmediate; }
829  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
830  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
831  bool isPostIdxReg() const {
832    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
833  }
834  bool isMemNoOffset(bool alignOK = false) const {
835    if (!isMemory())
836      return false;
837    // No offset of any kind.
838    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
839     (alignOK || Memory.Alignment == 0);
840  }
841  bool isAlignedMemory() const {
842    return isMemNoOffset(true);
843  }
844  bool isAddrMode2() const {
845    if (!isMemory() || Memory.Alignment != 0) return false;
846    // Check for register offset.
847    if (Memory.OffsetRegNum) return true;
848    // Immediate offset in range [-4095, 4095].
849    if (!Memory.OffsetImm) return true;
850    int64_t Val = Memory.OffsetImm->getValue();
851    return Val > -4096 && Val < 4096;
852  }
853  bool isAM2OffsetImm() const {
854    if (!isImm()) return false;
855    // Immediate offset in range [-4095, 4095].
856    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
857    if (!CE) return false;
858    int64_t Val = CE->getValue();
859    return Val > -4096 && Val < 4096;
860  }
861  bool isAddrMode3() const {
862    // If we have an immediate that's not a constant, treat it as a label
863    // reference needing a fixup. If it is a constant, it's something else
864    // and we reject it.
865    if (isImm() && !isa<MCConstantExpr>(getImm()))
866      return true;
867    if (!isMemory() || Memory.Alignment != 0) return false;
868    // No shifts are legal for AM3.
869    if (Memory.ShiftType != ARM_AM::no_shift) return false;
870    // Check for register offset.
871    if (Memory.OffsetRegNum) return true;
872    // Immediate offset in range [-255, 255].
873    if (!Memory.OffsetImm) return true;
874    int64_t Val = Memory.OffsetImm->getValue();
875    return Val > -256 && Val < 256;
876  }
877  bool isAM3Offset() const {
878    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
879      return false;
880    if (Kind == k_PostIndexRegister)
881      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
882    // Immediate offset in range [-255, 255].
883    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
884    if (!CE) return false;
885    int64_t Val = CE->getValue();
886    // Special case, #-0 is INT32_MIN.
887    return (Val > -256 && Val < 256) || Val == INT32_MIN;
888  }
889  bool isAddrMode5() const {
890    // If we have an immediate that's not a constant, treat it as a label
891    // reference needing a fixup. If it is a constant, it's something else
892    // and we reject it.
893    if (isImm() && !isa<MCConstantExpr>(getImm()))
894      return true;
895    if (!isMemory() || Memory.Alignment != 0) return false;
896    // Check for register offset.
897    if (Memory.OffsetRegNum) return false;
898    // Immediate offset in range [-1020, 1020] and a multiple of 4.
899    if (!Memory.OffsetImm) return true;
900    int64_t Val = Memory.OffsetImm->getValue();
901    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
902      Val == INT32_MIN;
903  }
904  bool isMemTBB() const {
905    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
906        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
907      return false;
908    return true;
909  }
910  bool isMemTBH() const {
911    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
912        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
913        Memory.Alignment != 0 )
914      return false;
915    return true;
916  }
917  bool isMemRegOffset() const {
918    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
919      return false;
920    return true;
921  }
922  bool isT2MemRegOffset() const {
923    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
924        Memory.Alignment != 0)
925      return false;
926    // Only lsl #{0, 1, 2, 3} allowed.
927    if (Memory.ShiftType == ARM_AM::no_shift)
928      return true;
929    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
930      return false;
931    return true;
932  }
933  bool isMemThumbRR() const {
934    // Thumb reg+reg addressing is simple. Just two registers, a base and
935    // an offset. No shifts, negations or any other complicating factors.
936    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
937        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
938      return false;
939    return isARMLowRegister(Memory.BaseRegNum) &&
940      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
941  }
942  bool isMemThumbRIs4() const {
943    if (!isMemory() || Memory.OffsetRegNum != 0 ||
944        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
945      return false;
946    // Immediate offset, multiple of 4 in range [0, 124].
947    if (!Memory.OffsetImm) return true;
948    int64_t Val = Memory.OffsetImm->getValue();
949    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
950  }
951  bool isMemThumbRIs2() const {
952    if (!isMemory() || Memory.OffsetRegNum != 0 ||
953        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
954      return false;
955    // Immediate offset, multiple of 4 in range [0, 62].
956    if (!Memory.OffsetImm) return true;
957    int64_t Val = Memory.OffsetImm->getValue();
958    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
959  }
960  bool isMemThumbRIs1() const {
961    if (!isMemory() || Memory.OffsetRegNum != 0 ||
962        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
963      return false;
964    // Immediate offset in range [0, 31].
965    if (!Memory.OffsetImm) return true;
966    int64_t Val = Memory.OffsetImm->getValue();
967    return Val >= 0 && Val <= 31;
968  }
969  bool isMemThumbSPI() const {
970    if (!isMemory() || Memory.OffsetRegNum != 0 ||
971        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
972      return false;
973    // Immediate offset, multiple of 4 in range [0, 1020].
974    if (!Memory.OffsetImm) return true;
975    int64_t Val = Memory.OffsetImm->getValue();
976    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
977  }
978  bool isMemImm8s4Offset() const {
979    // If we have an immediate that's not a constant, treat it as a label
980    // reference needing a fixup. If it is a constant, it's something else
981    // and we reject it.
982    if (isImm() && !isa<MCConstantExpr>(getImm()))
983      return true;
984    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
985      return false;
986    // Immediate offset a multiple of 4 in range [-1020, 1020].
987    if (!Memory.OffsetImm) return true;
988    int64_t Val = Memory.OffsetImm->getValue();
989    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
990  }
991  bool isMemImm0_1020s4Offset() const {
992    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
993      return false;
994    // Immediate offset a multiple of 4 in range [0, 1020].
995    if (!Memory.OffsetImm) return true;
996    int64_t Val = Memory.OffsetImm->getValue();
997    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
998  }
999  bool isMemImm8Offset() const {
1000    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1001      return false;
1002    // Immediate offset in range [-255, 255].
1003    if (!Memory.OffsetImm) return true;
1004    int64_t Val = Memory.OffsetImm->getValue();
1005    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1006  }
1007  bool isMemPosImm8Offset() const {
1008    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1009      return false;
1010    // Immediate offset in range [0, 255].
1011    if (!Memory.OffsetImm) return true;
1012    int64_t Val = Memory.OffsetImm->getValue();
1013    return Val >= 0 && Val < 256;
1014  }
1015  bool isMemNegImm8Offset() const {
1016    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1017      return false;
1018    // Immediate offset in range [-255, -1].
1019    if (!Memory.OffsetImm) return false;
1020    int64_t Val = Memory.OffsetImm->getValue();
1021    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1022  }
1023  bool isMemUImm12Offset() const {
1024    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1025      return false;
1026    // Immediate offset in range [0, 4095].
1027    if (!Memory.OffsetImm) return true;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return (Val >= 0 && Val < 4096);
1030  }
1031  bool isMemImm12Offset() const {
1032    // If we have an immediate that's not a constant, treat it as a label
1033    // reference needing a fixup. If it is a constant, it's something else
1034    // and we reject it.
1035    if (isImm() && !isa<MCConstantExpr>(getImm()))
1036      return true;
1037
1038    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1039      return false;
1040    // Immediate offset in range [-4095, 4095].
1041    if (!Memory.OffsetImm) return true;
1042    int64_t Val = Memory.OffsetImm->getValue();
1043    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1044  }
1045  bool isPostIdxImm8() const {
1046    if (!isImm()) return false;
1047    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1048    if (!CE) return false;
1049    int64_t Val = CE->getValue();
1050    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1051  }
1052  bool isPostIdxImm8s4() const {
1053    if (!isImm()) return false;
1054    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1055    if (!CE) return false;
1056    int64_t Val = CE->getValue();
1057    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1058      (Val == INT32_MIN);
1059  }
1060
1061  bool isMSRMask() const { return Kind == k_MSRMask; }
1062  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1063
1064  // NEON operands.
1065  bool isSingleSpacedVectorList() const {
1066    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1067  }
1068  bool isDoubleSpacedVectorList() const {
1069    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1070  }
1071  bool isVecListOneD() const {
1072    if (!isSingleSpacedVectorList()) return false;
1073    return VectorList.Count == 1;
1074  }
1075
1076  bool isVecListTwoD() const {
1077    if (!isSingleSpacedVectorList()) return false;
1078    return VectorList.Count == 2;
1079  }
1080
1081  bool isVecListThreeD() const {
1082    if (!isSingleSpacedVectorList()) return false;
1083    return VectorList.Count == 3;
1084  }
1085
1086  bool isVecListFourD() const {
1087    if (!isSingleSpacedVectorList()) return false;
1088    return VectorList.Count == 4;
1089  }
1090
1091  bool isVecListTwoQ() const {
1092    if (!isDoubleSpacedVectorList()) return false;
1093    return VectorList.Count == 2;
1094  }
1095
1096  bool isSingleSpacedVectorAllLanes() const {
1097    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1098  }
1099  bool isDoubleSpacedVectorAllLanes() const {
1100    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1101  }
1102  bool isVecListOneDAllLanes() const {
1103    if (!isSingleSpacedVectorAllLanes()) return false;
1104    return VectorList.Count == 1;
1105  }
1106
1107  bool isVecListTwoDAllLanes() const {
1108    if (!isSingleSpacedVectorAllLanes()) return false;
1109    return VectorList.Count == 2;
1110  }
1111
1112  bool isVecListTwoQAllLanes() const {
1113    if (!isDoubleSpacedVectorAllLanes()) return false;
1114    return VectorList.Count == 2;
1115  }
1116
1117  bool isSingleSpacedVectorIndexed() const {
1118    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1119  }
1120  bool isDoubleSpacedVectorIndexed() const {
1121    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1122  }
1123  bool isVecListOneDByteIndexed() const {
1124    if (!isSingleSpacedVectorIndexed()) return false;
1125    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1126  }
1127
1128  bool isVecListOneDHWordIndexed() const {
1129    if (!isSingleSpacedVectorIndexed()) return false;
1130    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1131  }
1132
1133  bool isVecListOneDWordIndexed() const {
1134    if (!isSingleSpacedVectorIndexed()) return false;
1135    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1136  }
1137
1138  bool isVecListTwoDByteIndexed() const {
1139    if (!isSingleSpacedVectorIndexed()) return false;
1140    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1141  }
1142
1143  bool isVecListTwoDHWordIndexed() const {
1144    if (!isSingleSpacedVectorIndexed()) return false;
1145    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1146  }
1147
1148  bool isVecListTwoQWordIndexed() const {
1149    if (!isDoubleSpacedVectorIndexed()) return false;
1150    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1151  }
1152
1153  bool isVecListTwoQHWordIndexed() const {
1154    if (!isDoubleSpacedVectorIndexed()) return false;
1155    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1156  }
1157
1158  bool isVecListTwoDWordIndexed() const {
1159    if (!isSingleSpacedVectorIndexed()) return false;
1160    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1161  }
1162
1163  bool isVectorIndex8() const {
1164    if (Kind != k_VectorIndex) return false;
1165    return VectorIndex.Val < 8;
1166  }
1167  bool isVectorIndex16() const {
1168    if (Kind != k_VectorIndex) return false;
1169    return VectorIndex.Val < 4;
1170  }
1171  bool isVectorIndex32() const {
1172    if (Kind != k_VectorIndex) return false;
1173    return VectorIndex.Val < 2;
1174  }
1175
1176  bool isNEONi8splat() const {
1177    if (!isImm()) return false;
1178    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1179    // Must be a constant.
1180    if (!CE) return false;
1181    int64_t Value = CE->getValue();
1182    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1183    // value.
1184    return Value >= 0 && Value < 256;
1185  }
1186
1187  bool isNEONi16splat() const {
1188    if (!isImm()) return false;
1189    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1190    // Must be a constant.
1191    if (!CE) return false;
1192    int64_t Value = CE->getValue();
1193    // i16 value in the range [0,255] or [0x0100, 0xff00]
1194    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1195  }
1196
1197  bool isNEONi32splat() const {
1198    if (!isImm()) return false;
1199    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1200    // Must be a constant.
1201    if (!CE) return false;
1202    int64_t Value = CE->getValue();
1203    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1204    return (Value >= 0 && Value < 256) ||
1205      (Value >= 0x0100 && Value <= 0xff00) ||
1206      (Value >= 0x010000 && Value <= 0xff0000) ||
1207      (Value >= 0x01000000 && Value <= 0xff000000);
1208  }
1209
1210  bool isNEONi32vmov() const {
1211    if (!isImm()) return false;
1212    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1213    // Must be a constant.
1214    if (!CE) return false;
1215    int64_t Value = CE->getValue();
1216    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1217    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1218    return (Value >= 0 && Value < 256) ||
1219      (Value >= 0x0100 && Value <= 0xff00) ||
1220      (Value >= 0x010000 && Value <= 0xff0000) ||
1221      (Value >= 0x01000000 && Value <= 0xff000000) ||
1222      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1223      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1224  }
1225  bool isNEONi32vmovNeg() const {
1226    if (!isImm()) return false;
1227    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1228    // Must be a constant.
1229    if (!CE) return false;
1230    int64_t Value = ~CE->getValue();
1231    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1232    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1233    return (Value >= 0 && Value < 256) ||
1234      (Value >= 0x0100 && Value <= 0xff00) ||
1235      (Value >= 0x010000 && Value <= 0xff0000) ||
1236      (Value >= 0x01000000 && Value <= 0xff000000) ||
1237      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1238      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1239  }
1240
1241  bool isNEONi64splat() const {
1242    if (!isImm()) return false;
1243    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1244    // Must be a constant.
1245    if (!CE) return false;
1246    uint64_t Value = CE->getValue();
1247    // i64 value with each byte being either 0 or 0xff.
1248    for (unsigned i = 0; i < 8; ++i)
1249      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1250    return true;
1251  }
1252
1253  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1254    // Add as immediates when possible.  Null MCExpr = 0.
1255    if (Expr == 0)
1256      Inst.addOperand(MCOperand::CreateImm(0));
1257    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1258      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1259    else
1260      Inst.addOperand(MCOperand::CreateExpr(Expr));
1261  }
1262
1263  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1264    assert(N == 2 && "Invalid number of operands!");
1265    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1266    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1267    Inst.addOperand(MCOperand::CreateReg(RegNum));
1268  }
1269
1270  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1271    assert(N == 1 && "Invalid number of operands!");
1272    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1273  }
1274
1275  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1276    assert(N == 1 && "Invalid number of operands!");
1277    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1278  }
1279
1280  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1281    assert(N == 1 && "Invalid number of operands!");
1282    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1283  }
1284
1285  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1286    assert(N == 1 && "Invalid number of operands!");
1287    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1288  }
1289
1290  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1291    assert(N == 1 && "Invalid number of operands!");
1292    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1293  }
1294
1295  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1296    assert(N == 1 && "Invalid number of operands!");
1297    Inst.addOperand(MCOperand::CreateReg(getReg()));
1298  }
1299
1300  void addRegOperands(MCInst &Inst, unsigned N) const {
1301    assert(N == 1 && "Invalid number of operands!");
1302    Inst.addOperand(MCOperand::CreateReg(getReg()));
1303  }
1304
1305  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1306    assert(N == 3 && "Invalid number of operands!");
1307    assert(isRegShiftedReg() &&
1308           "addRegShiftedRegOperands() on non RegShiftedReg!");
1309    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1310    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1311    Inst.addOperand(MCOperand::CreateImm(
1312      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1313  }
1314
1315  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1316    assert(N == 2 && "Invalid number of operands!");
1317    assert(isRegShiftedImm() &&
1318           "addRegShiftedImmOperands() on non RegShiftedImm!");
1319    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1320    Inst.addOperand(MCOperand::CreateImm(
1321      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1322  }
1323
1324  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1325    assert(N == 1 && "Invalid number of operands!");
1326    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1327                                         ShifterImm.Imm));
1328  }
1329
1330  void addRegListOperands(MCInst &Inst, unsigned N) const {
1331    assert(N == 1 && "Invalid number of operands!");
1332    const SmallVectorImpl<unsigned> &RegList = getRegList();
1333    for (SmallVectorImpl<unsigned>::const_iterator
1334           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1335      Inst.addOperand(MCOperand::CreateReg(*I));
1336  }
1337
1338  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1339    addRegListOperands(Inst, N);
1340  }
1341
1342  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1343    addRegListOperands(Inst, N);
1344  }
1345
1346  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1347    assert(N == 1 && "Invalid number of operands!");
1348    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1349    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1350  }
1351
1352  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1353    assert(N == 1 && "Invalid number of operands!");
1354    // Munge the lsb/width into a bitfield mask.
1355    unsigned lsb = Bitfield.LSB;
1356    unsigned width = Bitfield.Width;
1357    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1358    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1359                      (32 - (lsb + width)));
1360    Inst.addOperand(MCOperand::CreateImm(Mask));
1361  }
1362
1363  void addImmOperands(MCInst &Inst, unsigned N) const {
1364    assert(N == 1 && "Invalid number of operands!");
1365    addExpr(Inst, getImm());
1366  }
1367
1368  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1369    assert(N == 1 && "Invalid number of operands!");
1370    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1371    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1372  }
1373
1374  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1375    assert(N == 1 && "Invalid number of operands!");
1376    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1377    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1378  }
1379
1380  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1381    assert(N == 1 && "Invalid number of operands!");
1382    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1383  }
1384
1385  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1386    assert(N == 1 && "Invalid number of operands!");
1387    // FIXME: We really want to scale the value here, but the LDRD/STRD
1388    // instruction don't encode operands that way yet.
1389    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1390    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1391  }
1392
1393  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1394    assert(N == 1 && "Invalid number of operands!");
1395    // The immediate is scaled by four in the encoding and is stored
1396    // in the MCInst as such. Lop off the low two bits here.
1397    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1398    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1399  }
1400
1401  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1402    assert(N == 1 && "Invalid number of operands!");
1403    // The immediate is scaled by four in the encoding and is stored
1404    // in the MCInst as such. Lop off the low two bits here.
1405    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1406    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1407  }
1408
1409  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1410    assert(N == 1 && "Invalid number of operands!");
1411    // The constant encodes as the immediate-1, and we store in the instruction
1412    // the bits as encoded, so subtract off one here.
1413    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1414    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1415  }
1416
1417  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1418    assert(N == 1 && "Invalid number of operands!");
1419    // The constant encodes as the immediate-1, and we store in the instruction
1420    // the bits as encoded, so subtract off one here.
1421    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1422    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1423  }
1424
1425  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1426    assert(N == 1 && "Invalid number of operands!");
1427    // The constant encodes as the immediate, except for 32, which encodes as
1428    // zero.
1429    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1430    unsigned Imm = CE->getValue();
1431    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1432  }
1433
1434  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1435    assert(N == 1 && "Invalid number of operands!");
1436    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1437    // the instruction as well.
1438    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1439    int Val = CE->getValue();
1440    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1441  }
1442
1443  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1444    assert(N == 1 && "Invalid number of operands!");
1445    // The operand is actually a t2_so_imm, but we have its bitwise
1446    // negation in the assembly source, so twiddle it here.
1447    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1448    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1449  }
1450
1451  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    // The operand is actually a t2_so_imm, but we have its
1454    // negation in the assembly source, so twiddle it here.
1455    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1456    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1457  }
1458
1459  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1460    assert(N == 1 && "Invalid number of operands!");
1461    // The operand is actually a so_imm, but we have its bitwise
1462    // negation in the assembly source, so twiddle it here.
1463    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1464    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1465  }
1466
1467  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    // The operand is actually a so_imm, but we have its
1470    // negation in the assembly source, so twiddle it here.
1471    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1472    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1473  }
1474
1475  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 1 && "Invalid number of operands!");
1477    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1478  }
1479
1480  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1481    assert(N == 1 && "Invalid number of operands!");
1482    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1483  }
1484
1485  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1486    assert(N == 2 && "Invalid number of operands!");
1487    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1488    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1489  }
1490
1491  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1492    assert(N == 3 && "Invalid number of operands!");
1493    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1494    if (!Memory.OffsetRegNum) {
1495      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1496      // Special case for #-0
1497      if (Val == INT32_MIN) Val = 0;
1498      if (Val < 0) Val = -Val;
1499      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1500    } else {
1501      // For register offset, we encode the shift type and negation flag
1502      // here.
1503      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1504                              Memory.ShiftImm, Memory.ShiftType);
1505    }
1506    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1507    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1508    Inst.addOperand(MCOperand::CreateImm(Val));
1509  }
1510
1511  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1512    assert(N == 2 && "Invalid number of operands!");
1513    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1514    assert(CE && "non-constant AM2OffsetImm operand!");
1515    int32_t Val = CE->getValue();
1516    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1517    // Special case for #-0
1518    if (Val == INT32_MIN) Val = 0;
1519    if (Val < 0) Val = -Val;
1520    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1521    Inst.addOperand(MCOperand::CreateReg(0));
1522    Inst.addOperand(MCOperand::CreateImm(Val));
1523  }
1524
1525  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1526    assert(N == 3 && "Invalid number of operands!");
1527    // If we have an immediate that's not a constant, treat it as a label
1528    // reference needing a fixup. If it is a constant, it's something else
1529    // and we reject it.
1530    if (isImm()) {
1531      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1532      Inst.addOperand(MCOperand::CreateReg(0));
1533      Inst.addOperand(MCOperand::CreateImm(0));
1534      return;
1535    }
1536
1537    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1538    if (!Memory.OffsetRegNum) {
1539      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1540      // Special case for #-0
1541      if (Val == INT32_MIN) Val = 0;
1542      if (Val < 0) Val = -Val;
1543      Val = ARM_AM::getAM3Opc(AddSub, Val);
1544    } else {
1545      // For register offset, we encode the shift type and negation flag
1546      // here.
1547      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1548    }
1549    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1550    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1551    Inst.addOperand(MCOperand::CreateImm(Val));
1552  }
1553
1554  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1555    assert(N == 2 && "Invalid number of operands!");
1556    if (Kind == k_PostIndexRegister) {
1557      int32_t Val =
1558        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1559      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1560      Inst.addOperand(MCOperand::CreateImm(Val));
1561      return;
1562    }
1563
1564    // Constant offset.
1565    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1566    int32_t Val = CE->getValue();
1567    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1568    // Special case for #-0
1569    if (Val == INT32_MIN) Val = 0;
1570    if (Val < 0) Val = -Val;
1571    Val = ARM_AM::getAM3Opc(AddSub, Val);
1572    Inst.addOperand(MCOperand::CreateReg(0));
1573    Inst.addOperand(MCOperand::CreateImm(Val));
1574  }
1575
1576  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1577    assert(N == 2 && "Invalid number of operands!");
1578    // If we have an immediate that's not a constant, treat it as a label
1579    // reference needing a fixup. If it is a constant, it's something else
1580    // and we reject it.
1581    if (isImm()) {
1582      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1583      Inst.addOperand(MCOperand::CreateImm(0));
1584      return;
1585    }
1586
1587    // The lower two bits are always zero and as such are not encoded.
1588    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1589    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1590    // Special case for #-0
1591    if (Val == INT32_MIN) Val = 0;
1592    if (Val < 0) Val = -Val;
1593    Val = ARM_AM::getAM5Opc(AddSub, Val);
1594    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1595    Inst.addOperand(MCOperand::CreateImm(Val));
1596  }
1597
1598  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1599    assert(N == 2 && "Invalid number of operands!");
1600    // If we have an immediate that's not a constant, treat it as a label
1601    // reference needing a fixup. If it is a constant, it's something else
1602    // and we reject it.
1603    if (isImm()) {
1604      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1605      Inst.addOperand(MCOperand::CreateImm(0));
1606      return;
1607    }
1608
1609    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1610    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1611    Inst.addOperand(MCOperand::CreateImm(Val));
1612  }
1613
1614  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1615    assert(N == 2 && "Invalid number of operands!");
1616    // The lower two bits are always zero and as such are not encoded.
1617    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1618    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1619    Inst.addOperand(MCOperand::CreateImm(Val));
1620  }
1621
1622  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1623    assert(N == 2 && "Invalid number of operands!");
1624    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1625    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1626    Inst.addOperand(MCOperand::CreateImm(Val));
1627  }
1628
1629  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1630    addMemImm8OffsetOperands(Inst, N);
1631  }
1632
1633  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1634    addMemImm8OffsetOperands(Inst, N);
1635  }
1636
1637  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1638    assert(N == 2 && "Invalid number of operands!");
1639    // If this is an immediate, it's a label reference.
1640    if (isImm()) {
1641      addExpr(Inst, getImm());
1642      Inst.addOperand(MCOperand::CreateImm(0));
1643      return;
1644    }
1645
1646    // Otherwise, it's a normal memory reg+offset.
1647    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1648    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1649    Inst.addOperand(MCOperand::CreateImm(Val));
1650  }
1651
1652  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1653    assert(N == 2 && "Invalid number of operands!");
1654    // If this is an immediate, it's a label reference.
1655    if (isImm()) {
1656      addExpr(Inst, getImm());
1657      Inst.addOperand(MCOperand::CreateImm(0));
1658      return;
1659    }
1660
1661    // Otherwise, it's a normal memory reg+offset.
1662    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1663    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1664    Inst.addOperand(MCOperand::CreateImm(Val));
1665  }
1666
1667  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1668    assert(N == 2 && "Invalid number of operands!");
1669    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1670    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1671  }
1672
1673  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1674    assert(N == 2 && "Invalid number of operands!");
1675    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1676    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1677  }
1678
1679  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1680    assert(N == 3 && "Invalid number of operands!");
1681    unsigned Val =
1682      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1683                        Memory.ShiftImm, Memory.ShiftType);
1684    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1685    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1686    Inst.addOperand(MCOperand::CreateImm(Val));
1687  }
1688
1689  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1690    assert(N == 3 && "Invalid number of operands!");
1691    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1692    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1693    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1694  }
1695
1696  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1697    assert(N == 2 && "Invalid number of operands!");
1698    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1699    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1700  }
1701
1702  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1703    assert(N == 2 && "Invalid number of operands!");
1704    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1705    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1706    Inst.addOperand(MCOperand::CreateImm(Val));
1707  }
1708
1709  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1710    assert(N == 2 && "Invalid number of operands!");
1711    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1712    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1713    Inst.addOperand(MCOperand::CreateImm(Val));
1714  }
1715
1716  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1717    assert(N == 2 && "Invalid number of operands!");
1718    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1719    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1720    Inst.addOperand(MCOperand::CreateImm(Val));
1721  }
1722
1723  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1724    assert(N == 2 && "Invalid number of operands!");
1725    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1726    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1727    Inst.addOperand(MCOperand::CreateImm(Val));
1728  }
1729
1730  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1731    assert(N == 1 && "Invalid number of operands!");
1732    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1733    assert(CE && "non-constant post-idx-imm8 operand!");
1734    int Imm = CE->getValue();
1735    bool isAdd = Imm >= 0;
1736    if (Imm == INT32_MIN) Imm = 0;
1737    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1738    Inst.addOperand(MCOperand::CreateImm(Imm));
1739  }
1740
1741  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1742    assert(N == 1 && "Invalid number of operands!");
1743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1744    assert(CE && "non-constant post-idx-imm8s4 operand!");
1745    int Imm = CE->getValue();
1746    bool isAdd = Imm >= 0;
1747    if (Imm == INT32_MIN) Imm = 0;
1748    // Immediate is scaled by 4.
1749    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1750    Inst.addOperand(MCOperand::CreateImm(Imm));
1751  }
1752
1753  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1754    assert(N == 2 && "Invalid number of operands!");
1755    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1756    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1757  }
1758
1759  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1760    assert(N == 2 && "Invalid number of operands!");
1761    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1762    // The sign, shift type, and shift amount are encoded in a single operand
1763    // using the AM2 encoding helpers.
1764    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1765    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1766                                     PostIdxReg.ShiftTy);
1767    Inst.addOperand(MCOperand::CreateImm(Imm));
1768  }
1769
1770  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1771    assert(N == 1 && "Invalid number of operands!");
1772    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1773  }
1774
1775  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1776    assert(N == 1 && "Invalid number of operands!");
1777    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1778  }
1779
1780  void addVecListOperands(MCInst &Inst, unsigned N) const {
1781    assert(N == 1 && "Invalid number of operands!");
1782    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1783  }
1784
1785  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1786    assert(N == 2 && "Invalid number of operands!");
1787    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1788    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1789  }
1790
1791  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1792    assert(N == 1 && "Invalid number of operands!");
1793    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1794  }
1795
1796  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1797    assert(N == 1 && "Invalid number of operands!");
1798    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1799  }
1800
1801  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1802    assert(N == 1 && "Invalid number of operands!");
1803    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1804  }
1805
1806  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1807    assert(N == 1 && "Invalid number of operands!");
1808    // The immediate encodes the type of constant as well as the value.
1809    // Mask in that this is an i8 splat.
1810    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1811    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1812  }
1813
1814  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1815    assert(N == 1 && "Invalid number of operands!");
1816    // The immediate encodes the type of constant as well as the value.
1817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1818    unsigned Value = CE->getValue();
1819    if (Value >= 256)
1820      Value = (Value >> 8) | 0xa00;
1821    else
1822      Value |= 0x800;
1823    Inst.addOperand(MCOperand::CreateImm(Value));
1824  }
1825
1826  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1827    assert(N == 1 && "Invalid number of operands!");
1828    // The immediate encodes the type of constant as well as the value.
1829    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1830    unsigned Value = CE->getValue();
1831    if (Value >= 256 && Value <= 0xff00)
1832      Value = (Value >> 8) | 0x200;
1833    else if (Value > 0xffff && Value <= 0xff0000)
1834      Value = (Value >> 16) | 0x400;
1835    else if (Value > 0xffffff)
1836      Value = (Value >> 24) | 0x600;
1837    Inst.addOperand(MCOperand::CreateImm(Value));
1838  }
1839
1840  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1841    assert(N == 1 && "Invalid number of operands!");
1842    // The immediate encodes the type of constant as well as the value.
1843    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1844    unsigned Value = CE->getValue();
1845    if (Value >= 256 && Value <= 0xffff)
1846      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1847    else if (Value > 0xffff && Value <= 0xffffff)
1848      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1849    else if (Value > 0xffffff)
1850      Value = (Value >> 24) | 0x600;
1851    Inst.addOperand(MCOperand::CreateImm(Value));
1852  }
1853
1854  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1855    assert(N == 1 && "Invalid number of operands!");
1856    // The immediate encodes the type of constant as well as the value.
1857    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1858    unsigned Value = ~CE->getValue();
1859    if (Value >= 256 && Value <= 0xffff)
1860      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1861    else if (Value > 0xffff && Value <= 0xffffff)
1862      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1863    else if (Value > 0xffffff)
1864      Value = (Value >> 24) | 0x600;
1865    Inst.addOperand(MCOperand::CreateImm(Value));
1866  }
1867
1868  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1869    assert(N == 1 && "Invalid number of operands!");
1870    // The immediate encodes the type of constant as well as the value.
1871    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1872    uint64_t Value = CE->getValue();
1873    unsigned Imm = 0;
1874    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1875      Imm |= (Value & 1) << i;
1876    }
1877    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1878  }
1879
1880  virtual void print(raw_ostream &OS) const;
1881
1882  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1883    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1884    Op->ITMask.Mask = Mask;
1885    Op->StartLoc = S;
1886    Op->EndLoc = S;
1887    return Op;
1888  }
1889
1890  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1891    ARMOperand *Op = new ARMOperand(k_CondCode);
1892    Op->CC.Val = CC;
1893    Op->StartLoc = S;
1894    Op->EndLoc = S;
1895    return Op;
1896  }
1897
1898  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1899    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1900    Op->Cop.Val = CopVal;
1901    Op->StartLoc = S;
1902    Op->EndLoc = S;
1903    return Op;
1904  }
1905
1906  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1907    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1908    Op->Cop.Val = CopVal;
1909    Op->StartLoc = S;
1910    Op->EndLoc = S;
1911    return Op;
1912  }
1913
1914  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1915    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1916    Op->Cop.Val = Val;
1917    Op->StartLoc = S;
1918    Op->EndLoc = E;
1919    return Op;
1920  }
1921
1922  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1923    ARMOperand *Op = new ARMOperand(k_CCOut);
1924    Op->Reg.RegNum = RegNum;
1925    Op->StartLoc = S;
1926    Op->EndLoc = S;
1927    return Op;
1928  }
1929
1930  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1931    ARMOperand *Op = new ARMOperand(k_Token);
1932    Op->Tok.Data = Str.data();
1933    Op->Tok.Length = Str.size();
1934    Op->StartLoc = S;
1935    Op->EndLoc = S;
1936    return Op;
1937  }
1938
1939  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1940    ARMOperand *Op = new ARMOperand(k_Register);
1941    Op->Reg.RegNum = RegNum;
1942    Op->StartLoc = S;
1943    Op->EndLoc = E;
1944    return Op;
1945  }
1946
1947  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1948                                           unsigned SrcReg,
1949                                           unsigned ShiftReg,
1950                                           unsigned ShiftImm,
1951                                           SMLoc S, SMLoc E) {
1952    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1953    Op->RegShiftedReg.ShiftTy = ShTy;
1954    Op->RegShiftedReg.SrcReg = SrcReg;
1955    Op->RegShiftedReg.ShiftReg = ShiftReg;
1956    Op->RegShiftedReg.ShiftImm = ShiftImm;
1957    Op->StartLoc = S;
1958    Op->EndLoc = E;
1959    return Op;
1960  }
1961
1962  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1963                                            unsigned SrcReg,
1964                                            unsigned ShiftImm,
1965                                            SMLoc S, SMLoc E) {
1966    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1967    Op->RegShiftedImm.ShiftTy = ShTy;
1968    Op->RegShiftedImm.SrcReg = SrcReg;
1969    Op->RegShiftedImm.ShiftImm = ShiftImm;
1970    Op->StartLoc = S;
1971    Op->EndLoc = E;
1972    return Op;
1973  }
1974
1975  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1976                                   SMLoc S, SMLoc E) {
1977    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1978    Op->ShifterImm.isASR = isASR;
1979    Op->ShifterImm.Imm = Imm;
1980    Op->StartLoc = S;
1981    Op->EndLoc = E;
1982    return Op;
1983  }
1984
1985  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1986    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1987    Op->RotImm.Imm = Imm;
1988    Op->StartLoc = S;
1989    Op->EndLoc = E;
1990    return Op;
1991  }
1992
1993  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1994                                    SMLoc S, SMLoc E) {
1995    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1996    Op->Bitfield.LSB = LSB;
1997    Op->Bitfield.Width = Width;
1998    Op->StartLoc = S;
1999    Op->EndLoc = E;
2000    return Op;
2001  }
2002
2003  static ARMOperand *
2004  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2005                SMLoc StartLoc, SMLoc EndLoc) {
2006    KindTy Kind = k_RegisterList;
2007
2008    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2009      Kind = k_DPRRegisterList;
2010    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2011             contains(Regs.front().first))
2012      Kind = k_SPRRegisterList;
2013
2014    ARMOperand *Op = new ARMOperand(Kind);
2015    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2016           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2017      Op->Registers.push_back(I->first);
2018    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2019    Op->StartLoc = StartLoc;
2020    Op->EndLoc = EndLoc;
2021    return Op;
2022  }
2023
2024  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2025                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2026    ARMOperand *Op = new ARMOperand(k_VectorList);
2027    Op->VectorList.RegNum = RegNum;
2028    Op->VectorList.Count = Count;
2029    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2030    Op->StartLoc = S;
2031    Op->EndLoc = E;
2032    return Op;
2033  }
2034
2035  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2036                                              bool isDoubleSpaced,
2037                                              SMLoc S, SMLoc E) {
2038    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2039    Op->VectorList.RegNum = RegNum;
2040    Op->VectorList.Count = Count;
2041    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2042    Op->StartLoc = S;
2043    Op->EndLoc = E;
2044    return Op;
2045  }
2046
2047  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2048                                             unsigned Index,
2049                                             bool isDoubleSpaced,
2050                                             SMLoc S, SMLoc E) {
2051    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2052    Op->VectorList.RegNum = RegNum;
2053    Op->VectorList.Count = Count;
2054    Op->VectorList.LaneIndex = Index;
2055    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2056    Op->StartLoc = S;
2057    Op->EndLoc = E;
2058    return Op;
2059  }
2060
2061  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2062                                       MCContext &Ctx) {
2063    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2064    Op->VectorIndex.Val = Idx;
2065    Op->StartLoc = S;
2066    Op->EndLoc = E;
2067    return Op;
2068  }
2069
2070  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2071    ARMOperand *Op = new ARMOperand(k_Immediate);
2072    Op->Imm.Val = Val;
2073    Op->StartLoc = S;
2074    Op->EndLoc = E;
2075    return Op;
2076  }
2077
2078  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2079    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2080    Op->FPImm.Val = Val;
2081    Op->StartLoc = S;
2082    Op->EndLoc = S;
2083    return Op;
2084  }
2085
2086  static ARMOperand *CreateMem(unsigned BaseRegNum,
2087                               const MCConstantExpr *OffsetImm,
2088                               unsigned OffsetRegNum,
2089                               ARM_AM::ShiftOpc ShiftType,
2090                               unsigned ShiftImm,
2091                               unsigned Alignment,
2092                               bool isNegative,
2093                               SMLoc S, SMLoc E) {
2094    ARMOperand *Op = new ARMOperand(k_Memory);
2095    Op->Memory.BaseRegNum = BaseRegNum;
2096    Op->Memory.OffsetImm = OffsetImm;
2097    Op->Memory.OffsetRegNum = OffsetRegNum;
2098    Op->Memory.ShiftType = ShiftType;
2099    Op->Memory.ShiftImm = ShiftImm;
2100    Op->Memory.Alignment = Alignment;
2101    Op->Memory.isNegative = isNegative;
2102    Op->StartLoc = S;
2103    Op->EndLoc = E;
2104    return Op;
2105  }
2106
2107  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2108                                      ARM_AM::ShiftOpc ShiftTy,
2109                                      unsigned ShiftImm,
2110                                      SMLoc S, SMLoc E) {
2111    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2112    Op->PostIdxReg.RegNum = RegNum;
2113    Op->PostIdxReg.isAdd = isAdd;
2114    Op->PostIdxReg.ShiftTy = ShiftTy;
2115    Op->PostIdxReg.ShiftImm = ShiftImm;
2116    Op->StartLoc = S;
2117    Op->EndLoc = E;
2118    return Op;
2119  }
2120
2121  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2122    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2123    Op->MBOpt.Val = Opt;
2124    Op->StartLoc = S;
2125    Op->EndLoc = S;
2126    return Op;
2127  }
2128
2129  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2130    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2131    Op->IFlags.Val = IFlags;
2132    Op->StartLoc = S;
2133    Op->EndLoc = S;
2134    return Op;
2135  }
2136
2137  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2138    ARMOperand *Op = new ARMOperand(k_MSRMask);
2139    Op->MMask.Val = MMask;
2140    Op->StartLoc = S;
2141    Op->EndLoc = S;
2142    return Op;
2143  }
2144};
2145
2146} // end anonymous namespace.
2147
2148void ARMOperand::print(raw_ostream &OS) const {
2149  switch (Kind) {
2150  case k_FPImmediate:
2151    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2152       << ") >";
2153    break;
2154  case k_CondCode:
2155    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2156    break;
2157  case k_CCOut:
2158    OS << "<ccout " << getReg() << ">";
2159    break;
2160  case k_ITCondMask: {
2161    static const char *MaskStr[] = {
2162      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2163      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2164    };
2165    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2166    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2167    break;
2168  }
2169  case k_CoprocNum:
2170    OS << "<coprocessor number: " << getCoproc() << ">";
2171    break;
2172  case k_CoprocReg:
2173    OS << "<coprocessor register: " << getCoproc() << ">";
2174    break;
2175  case k_CoprocOption:
2176    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2177    break;
2178  case k_MSRMask:
2179    OS << "<mask: " << getMSRMask() << ">";
2180    break;
2181  case k_Immediate:
2182    getImm()->print(OS);
2183    break;
2184  case k_MemBarrierOpt:
2185    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2186    break;
2187  case k_Memory:
2188    OS << "<memory "
2189       << " base:" << Memory.BaseRegNum;
2190    OS << ">";
2191    break;
2192  case k_PostIndexRegister:
2193    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2194       << PostIdxReg.RegNum;
2195    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2196      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2197         << PostIdxReg.ShiftImm;
2198    OS << ">";
2199    break;
2200  case k_ProcIFlags: {
2201    OS << "<ARM_PROC::";
2202    unsigned IFlags = getProcIFlags();
2203    for (int i=2; i >= 0; --i)
2204      if (IFlags & (1 << i))
2205        OS << ARM_PROC::IFlagsToString(1 << i);
2206    OS << ">";
2207    break;
2208  }
2209  case k_Register:
2210    OS << "<register " << getReg() << ">";
2211    break;
2212  case k_ShifterImmediate:
2213    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2214       << " #" << ShifterImm.Imm << ">";
2215    break;
2216  case k_ShiftedRegister:
2217    OS << "<so_reg_reg "
2218       << RegShiftedReg.SrcReg << " "
2219       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2220       << " " << RegShiftedReg.ShiftReg << ">";
2221    break;
2222  case k_ShiftedImmediate:
2223    OS << "<so_reg_imm "
2224       << RegShiftedImm.SrcReg << " "
2225       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2226       << " #" << RegShiftedImm.ShiftImm << ">";
2227    break;
2228  case k_RotateImmediate:
2229    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2230    break;
2231  case k_BitfieldDescriptor:
2232    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2233       << ", width: " << Bitfield.Width << ">";
2234    break;
2235  case k_RegisterList:
2236  case k_DPRRegisterList:
2237  case k_SPRRegisterList: {
2238    OS << "<register_list ";
2239
2240    const SmallVectorImpl<unsigned> &RegList = getRegList();
2241    for (SmallVectorImpl<unsigned>::const_iterator
2242           I = RegList.begin(), E = RegList.end(); I != E; ) {
2243      OS << *I;
2244      if (++I < E) OS << ", ";
2245    }
2246
2247    OS << ">";
2248    break;
2249  }
2250  case k_VectorList:
2251    OS << "<vector_list " << VectorList.Count << " * "
2252       << VectorList.RegNum << ">";
2253    break;
2254  case k_VectorListAllLanes:
2255    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2256       << VectorList.RegNum << ">";
2257    break;
2258  case k_VectorListIndexed:
2259    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2260       << VectorList.Count << " * " << VectorList.RegNum << ">";
2261    break;
2262  case k_Token:
2263    OS << "'" << getToken() << "'";
2264    break;
2265  case k_VectorIndex:
2266    OS << "<vectorindex " << getVectorIndex() << ">";
2267    break;
2268  }
2269}
2270
2271/// @name Auto-generated Match Functions
2272/// {
2273
2274static unsigned MatchRegisterName(StringRef Name);
2275
2276/// }
2277
2278bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2279                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2280  StartLoc = Parser.getTok().getLoc();
2281  RegNo = tryParseRegister();
2282  EndLoc = Parser.getTok().getLoc();
2283
2284  return (RegNo == (unsigned)-1);
2285}
2286
2287/// Try to parse a register name.  The token must be an Identifier when called,
2288/// and if it is a register name the token is eaten and the register number is
2289/// returned.  Otherwise return -1.
2290///
2291int ARMAsmParser::tryParseRegister() {
2292  const AsmToken &Tok = Parser.getTok();
2293  if (Tok.isNot(AsmToken::Identifier)) return -1;
2294
2295  std::string lowerCase = Tok.getString().lower();
2296  unsigned RegNum = MatchRegisterName(lowerCase);
2297  if (!RegNum) {
2298    RegNum = StringSwitch<unsigned>(lowerCase)
2299      .Case("r13", ARM::SP)
2300      .Case("r14", ARM::LR)
2301      .Case("r15", ARM::PC)
2302      .Case("ip", ARM::R12)
2303      // Additional register name aliases for 'gas' compatibility.
2304      .Case("a1", ARM::R0)
2305      .Case("a2", ARM::R1)
2306      .Case("a3", ARM::R2)
2307      .Case("a4", ARM::R3)
2308      .Case("v1", ARM::R4)
2309      .Case("v2", ARM::R5)
2310      .Case("v3", ARM::R6)
2311      .Case("v4", ARM::R7)
2312      .Case("v5", ARM::R8)
2313      .Case("v6", ARM::R9)
2314      .Case("v7", ARM::R10)
2315      .Case("v8", ARM::R11)
2316      .Case("sb", ARM::R9)
2317      .Case("sl", ARM::R10)
2318      .Case("fp", ARM::R11)
2319      .Default(0);
2320  }
2321  if (!RegNum) {
2322    // Check for aliases registered via .req. Canonicalize to lower case.
2323    // That's more consistent since register names are case insensitive, and
2324    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2325    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2326    // If no match, return failure.
2327    if (Entry == RegisterReqs.end())
2328      return -1;
2329    Parser.Lex(); // Eat identifier token.
2330    return Entry->getValue();
2331  }
2332
2333  Parser.Lex(); // Eat identifier token.
2334
2335  return RegNum;
2336}
2337
2338// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2339// If a recoverable error occurs, return 1. If an irrecoverable error
2340// occurs, return -1. An irrecoverable error is one where tokens have been
2341// consumed in the process of trying to parse the shifter (i.e., when it is
2342// indeed a shifter operand, but malformed).
2343int ARMAsmParser::tryParseShiftRegister(
2344                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2345  SMLoc S = Parser.getTok().getLoc();
2346  const AsmToken &Tok = Parser.getTok();
2347  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2348
2349  std::string lowerCase = Tok.getString().lower();
2350  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2351      .Case("asl", ARM_AM::lsl)
2352      .Case("lsl", ARM_AM::lsl)
2353      .Case("lsr", ARM_AM::lsr)
2354      .Case("asr", ARM_AM::asr)
2355      .Case("ror", ARM_AM::ror)
2356      .Case("rrx", ARM_AM::rrx)
2357      .Default(ARM_AM::no_shift);
2358
2359  if (ShiftTy == ARM_AM::no_shift)
2360    return 1;
2361
2362  Parser.Lex(); // Eat the operator.
2363
2364  // The source register for the shift has already been added to the
2365  // operand list, so we need to pop it off and combine it into the shifted
2366  // register operand instead.
2367  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2368  if (!PrevOp->isReg())
2369    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2370  int SrcReg = PrevOp->getReg();
2371  int64_t Imm = 0;
2372  int ShiftReg = 0;
2373  if (ShiftTy == ARM_AM::rrx) {
2374    // RRX Doesn't have an explicit shift amount. The encoder expects
2375    // the shift register to be the same as the source register. Seems odd,
2376    // but OK.
2377    ShiftReg = SrcReg;
2378  } else {
2379    // Figure out if this is shifted by a constant or a register (for non-RRX).
2380    if (Parser.getTok().is(AsmToken::Hash) ||
2381        Parser.getTok().is(AsmToken::Dollar)) {
2382      Parser.Lex(); // Eat hash.
2383      SMLoc ImmLoc = Parser.getTok().getLoc();
2384      const MCExpr *ShiftExpr = 0;
2385      if (getParser().ParseExpression(ShiftExpr)) {
2386        Error(ImmLoc, "invalid immediate shift value");
2387        return -1;
2388      }
2389      // The expression must be evaluatable as an immediate.
2390      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2391      if (!CE) {
2392        Error(ImmLoc, "invalid immediate shift value");
2393        return -1;
2394      }
2395      // Range check the immediate.
2396      // lsl, ror: 0 <= imm <= 31
2397      // lsr, asr: 0 <= imm <= 32
2398      Imm = CE->getValue();
2399      if (Imm < 0 ||
2400          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2401          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2402        Error(ImmLoc, "immediate shift value out of range");
2403        return -1;
2404      }
2405      // shift by zero is a nop. Always send it through as lsl.
2406      // ('as' compatibility)
2407      if (Imm == 0)
2408        ShiftTy = ARM_AM::lsl;
2409    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2410      ShiftReg = tryParseRegister();
2411      SMLoc L = Parser.getTok().getLoc();
2412      if (ShiftReg == -1) {
2413        Error (L, "expected immediate or register in shift operand");
2414        return -1;
2415      }
2416    } else {
2417      Error (Parser.getTok().getLoc(),
2418                    "expected immediate or register in shift operand");
2419      return -1;
2420    }
2421  }
2422
2423  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2424    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2425                                                         ShiftReg, Imm,
2426                                               S, Parser.getTok().getLoc()));
2427  else
2428    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2429                                               S, Parser.getTok().getLoc()));
2430
2431  return 0;
2432}
2433
2434
2435/// Try to parse a register name.  The token must be an Identifier when called.
2436/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2437/// if there is a "writeback". 'true' if it's not a register.
2438///
2439/// TODO this is likely to change to allow different register types and or to
2440/// parse for a specific register type.
2441bool ARMAsmParser::
2442tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2443  SMLoc S = Parser.getTok().getLoc();
2444  int RegNo = tryParseRegister();
2445  if (RegNo == -1)
2446    return true;
2447
2448  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2449
2450  const AsmToken &ExclaimTok = Parser.getTok();
2451  if (ExclaimTok.is(AsmToken::Exclaim)) {
2452    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2453                                               ExclaimTok.getLoc()));
2454    Parser.Lex(); // Eat exclaim token
2455    return false;
2456  }
2457
2458  // Also check for an index operand. This is only legal for vector registers,
2459  // but that'll get caught OK in operand matching, so we don't need to
2460  // explicitly filter everything else out here.
2461  if (Parser.getTok().is(AsmToken::LBrac)) {
2462    SMLoc SIdx = Parser.getTok().getLoc();
2463    Parser.Lex(); // Eat left bracket token.
2464
2465    const MCExpr *ImmVal;
2466    if (getParser().ParseExpression(ImmVal))
2467      return MatchOperand_ParseFail;
2468    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2469    if (!MCE) {
2470      TokError("immediate value expected for vector index");
2471      return MatchOperand_ParseFail;
2472    }
2473
2474    SMLoc E = Parser.getTok().getLoc();
2475    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2476      Error(E, "']' expected");
2477      return MatchOperand_ParseFail;
2478    }
2479
2480    Parser.Lex(); // Eat right bracket token.
2481
2482    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2483                                                     SIdx, E,
2484                                                     getContext()));
2485  }
2486
2487  return false;
2488}
2489
2490/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2491/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2492/// "c5", ...
2493static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2494  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2495  // but efficient.
2496  switch (Name.size()) {
2497  default: break;
2498  case 2:
2499    if (Name[0] != CoprocOp)
2500      return -1;
2501    switch (Name[1]) {
2502    default:  return -1;
2503    case '0': return 0;
2504    case '1': return 1;
2505    case '2': return 2;
2506    case '3': return 3;
2507    case '4': return 4;
2508    case '5': return 5;
2509    case '6': return 6;
2510    case '7': return 7;
2511    case '8': return 8;
2512    case '9': return 9;
2513    }
2514    break;
2515  case 3:
2516    if (Name[0] != CoprocOp || Name[1] != '1')
2517      return -1;
2518    switch (Name[2]) {
2519    default:  return -1;
2520    case '0': return 10;
2521    case '1': return 11;
2522    case '2': return 12;
2523    case '3': return 13;
2524    case '4': return 14;
2525    case '5': return 15;
2526    }
2527    break;
2528  }
2529
2530  return -1;
2531}
2532
2533/// parseITCondCode - Try to parse a condition code for an IT instruction.
2534ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2535parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2536  SMLoc S = Parser.getTok().getLoc();
2537  const AsmToken &Tok = Parser.getTok();
2538  if (!Tok.is(AsmToken::Identifier))
2539    return MatchOperand_NoMatch;
2540  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2541    .Case("eq", ARMCC::EQ)
2542    .Case("ne", ARMCC::NE)
2543    .Case("hs", ARMCC::HS)
2544    .Case("cs", ARMCC::HS)
2545    .Case("lo", ARMCC::LO)
2546    .Case("cc", ARMCC::LO)
2547    .Case("mi", ARMCC::MI)
2548    .Case("pl", ARMCC::PL)
2549    .Case("vs", ARMCC::VS)
2550    .Case("vc", ARMCC::VC)
2551    .Case("hi", ARMCC::HI)
2552    .Case("ls", ARMCC::LS)
2553    .Case("ge", ARMCC::GE)
2554    .Case("lt", ARMCC::LT)
2555    .Case("gt", ARMCC::GT)
2556    .Case("le", ARMCC::LE)
2557    .Case("al", ARMCC::AL)
2558    .Default(~0U);
2559  if (CC == ~0U)
2560    return MatchOperand_NoMatch;
2561  Parser.Lex(); // Eat the token.
2562
2563  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2564
2565  return MatchOperand_Success;
2566}
2567
2568/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2569/// token must be an Identifier when called, and if it is a coprocessor
2570/// number, the token is eaten and the operand is added to the operand list.
2571ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2572parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2573  SMLoc S = Parser.getTok().getLoc();
2574  const AsmToken &Tok = Parser.getTok();
2575  if (Tok.isNot(AsmToken::Identifier))
2576    return MatchOperand_NoMatch;
2577
2578  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2579  if (Num == -1)
2580    return MatchOperand_NoMatch;
2581
2582  Parser.Lex(); // Eat identifier token.
2583  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2584  return MatchOperand_Success;
2585}
2586
2587/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2588/// token must be an Identifier when called, and if it is a coprocessor
2589/// number, the token is eaten and the operand is added to the operand list.
2590ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2591parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2592  SMLoc S = Parser.getTok().getLoc();
2593  const AsmToken &Tok = Parser.getTok();
2594  if (Tok.isNot(AsmToken::Identifier))
2595    return MatchOperand_NoMatch;
2596
2597  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2598  if (Reg == -1)
2599    return MatchOperand_NoMatch;
2600
2601  Parser.Lex(); // Eat identifier token.
2602  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2603  return MatchOperand_Success;
2604}
2605
2606/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2607/// coproc_option : '{' imm0_255 '}'
2608ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2609parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2610  SMLoc S = Parser.getTok().getLoc();
2611
2612  // If this isn't a '{', this isn't a coprocessor immediate operand.
2613  if (Parser.getTok().isNot(AsmToken::LCurly))
2614    return MatchOperand_NoMatch;
2615  Parser.Lex(); // Eat the '{'
2616
2617  const MCExpr *Expr;
2618  SMLoc Loc = Parser.getTok().getLoc();
2619  if (getParser().ParseExpression(Expr)) {
2620    Error(Loc, "illegal expression");
2621    return MatchOperand_ParseFail;
2622  }
2623  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2624  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2625    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2626    return MatchOperand_ParseFail;
2627  }
2628  int Val = CE->getValue();
2629
2630  // Check for and consume the closing '}'
2631  if (Parser.getTok().isNot(AsmToken::RCurly))
2632    return MatchOperand_ParseFail;
2633  SMLoc E = Parser.getTok().getLoc();
2634  Parser.Lex(); // Eat the '}'
2635
2636  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2637  return MatchOperand_Success;
2638}
2639
2640// For register list parsing, we need to map from raw GPR register numbering
2641// to the enumeration values. The enumeration values aren't sorted by
2642// register number due to our using "sp", "lr" and "pc" as canonical names.
2643static unsigned getNextRegister(unsigned Reg) {
2644  // If this is a GPR, we need to do it manually, otherwise we can rely
2645  // on the sort ordering of the enumeration since the other reg-classes
2646  // are sane.
2647  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2648    return Reg + 1;
2649  switch(Reg) {
2650  default: assert(0 && "Invalid GPR number!");
2651  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2652  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2653  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2654  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2655  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2656  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2657  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2658  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2659  }
2660}
2661
2662// Return the low-subreg of a given Q register.
2663static unsigned getDRegFromQReg(unsigned QReg) {
2664  switch (QReg) {
2665  default: llvm_unreachable("expected a Q register!");
2666  case ARM::Q0:  return ARM::D0;
2667  case ARM::Q1:  return ARM::D2;
2668  case ARM::Q2:  return ARM::D4;
2669  case ARM::Q3:  return ARM::D6;
2670  case ARM::Q4:  return ARM::D8;
2671  case ARM::Q5:  return ARM::D10;
2672  case ARM::Q6:  return ARM::D12;
2673  case ARM::Q7:  return ARM::D14;
2674  case ARM::Q8:  return ARM::D16;
2675  case ARM::Q9:  return ARM::D18;
2676  case ARM::Q10: return ARM::D20;
2677  case ARM::Q11: return ARM::D22;
2678  case ARM::Q12: return ARM::D24;
2679  case ARM::Q13: return ARM::D26;
2680  case ARM::Q14: return ARM::D28;
2681  case ARM::Q15: return ARM::D30;
2682  }
2683}
2684
2685/// Parse a register list.
2686bool ARMAsmParser::
2687parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2688  assert(Parser.getTok().is(AsmToken::LCurly) &&
2689         "Token is not a Left Curly Brace");
2690  SMLoc S = Parser.getTok().getLoc();
2691  Parser.Lex(); // Eat '{' token.
2692  SMLoc RegLoc = Parser.getTok().getLoc();
2693
2694  // Check the first register in the list to see what register class
2695  // this is a list of.
2696  int Reg = tryParseRegister();
2697  if (Reg == -1)
2698    return Error(RegLoc, "register expected");
2699
2700  // The reglist instructions have at most 16 registers, so reserve
2701  // space for that many.
2702  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2703
2704  // Allow Q regs and just interpret them as the two D sub-registers.
2705  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2706    Reg = getDRegFromQReg(Reg);
2707    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2708    ++Reg;
2709  }
2710  const MCRegisterClass *RC;
2711  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2712    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2713  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2714    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2715  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2716    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2717  else
2718    return Error(RegLoc, "invalid register in register list");
2719
2720  // Store the register.
2721  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2722
2723  // This starts immediately after the first register token in the list,
2724  // so we can see either a comma or a minus (range separator) as a legal
2725  // next token.
2726  while (Parser.getTok().is(AsmToken::Comma) ||
2727         Parser.getTok().is(AsmToken::Minus)) {
2728    if (Parser.getTok().is(AsmToken::Minus)) {
2729      Parser.Lex(); // Eat the minus.
2730      SMLoc EndLoc = Parser.getTok().getLoc();
2731      int EndReg = tryParseRegister();
2732      if (EndReg == -1)
2733        return Error(EndLoc, "register expected");
2734      // Allow Q regs and just interpret them as the two D sub-registers.
2735      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2736        EndReg = getDRegFromQReg(EndReg) + 1;
2737      // If the register is the same as the start reg, there's nothing
2738      // more to do.
2739      if (Reg == EndReg)
2740        continue;
2741      // The register must be in the same register class as the first.
2742      if (!RC->contains(EndReg))
2743        return Error(EndLoc, "invalid register in register list");
2744      // Ranges must go from low to high.
2745      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2746        return Error(EndLoc, "bad range in register list");
2747
2748      // Add all the registers in the range to the register list.
2749      while (Reg != EndReg) {
2750        Reg = getNextRegister(Reg);
2751        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2752      }
2753      continue;
2754    }
2755    Parser.Lex(); // Eat the comma.
2756    RegLoc = Parser.getTok().getLoc();
2757    int OldReg = Reg;
2758    const AsmToken RegTok = Parser.getTok();
2759    Reg = tryParseRegister();
2760    if (Reg == -1)
2761      return Error(RegLoc, "register expected");
2762    // Allow Q regs and just interpret them as the two D sub-registers.
2763    bool isQReg = false;
2764    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2765      Reg = getDRegFromQReg(Reg);
2766      isQReg = true;
2767    }
2768    // The register must be in the same register class as the first.
2769    if (!RC->contains(Reg))
2770      return Error(RegLoc, "invalid register in register list");
2771    // List must be monotonically increasing.
2772    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2773      return Error(RegLoc, "register list not in ascending order");
2774    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2775      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2776              ") in register list");
2777      continue;
2778    }
2779    // VFP register lists must also be contiguous.
2780    // It's OK to use the enumeration values directly here rather, as the
2781    // VFP register classes have the enum sorted properly.
2782    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2783        Reg != OldReg + 1)
2784      return Error(RegLoc, "non-contiguous register range");
2785    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2786    if (isQReg)
2787      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2788  }
2789
2790  SMLoc E = Parser.getTok().getLoc();
2791  if (Parser.getTok().isNot(AsmToken::RCurly))
2792    return Error(E, "'}' expected");
2793  Parser.Lex(); // Eat '}' token.
2794
2795  // Push the register list operand.
2796  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2797
2798  // The ARM system instruction variants for LDM/STM have a '^' token here.
2799  if (Parser.getTok().is(AsmToken::Caret)) {
2800    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2801    Parser.Lex(); // Eat '^' token.
2802  }
2803
2804  return false;
2805}
2806
2807// Helper function to parse the lane index for vector lists.
2808ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2809parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2810  Index = 0; // Always return a defined index value.
2811  if (Parser.getTok().is(AsmToken::LBrac)) {
2812    Parser.Lex(); // Eat the '['.
2813    if (Parser.getTok().is(AsmToken::RBrac)) {
2814      // "Dn[]" is the 'all lanes' syntax.
2815      LaneKind = AllLanes;
2816      Parser.Lex(); // Eat the ']'.
2817      return MatchOperand_Success;
2818    }
2819    const MCExpr *LaneIndex;
2820    SMLoc Loc = Parser.getTok().getLoc();
2821    if (getParser().ParseExpression(LaneIndex)) {
2822      Error(Loc, "illegal expression");
2823      return MatchOperand_ParseFail;
2824    }
2825    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2826    if (!CE) {
2827      Error(Loc, "lane index must be empty or an integer");
2828      return MatchOperand_ParseFail;
2829    }
2830    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2831      Error(Parser.getTok().getLoc(), "']' expected");
2832      return MatchOperand_ParseFail;
2833    }
2834    Parser.Lex(); // Eat the ']'.
2835    int64_t Val = CE->getValue();
2836
2837    // FIXME: Make this range check context sensitive for .8, .16, .32.
2838    if (Val < 0 || Val > 7) {
2839      Error(Parser.getTok().getLoc(), "lane index out of range");
2840      return MatchOperand_ParseFail;
2841    }
2842    Index = Val;
2843    LaneKind = IndexedLane;
2844    return MatchOperand_Success;
2845  }
2846  LaneKind = NoLanes;
2847  return MatchOperand_Success;
2848}
2849
2850// parse a vector register list
2851ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2852parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2853  VectorLaneTy LaneKind;
2854  unsigned LaneIndex;
2855  SMLoc S = Parser.getTok().getLoc();
2856  // As an extension (to match gas), support a plain D register or Q register
2857  // (without encosing curly braces) as a single or double entry list,
2858  // respectively.
2859  if (Parser.getTok().is(AsmToken::Identifier)) {
2860    int Reg = tryParseRegister();
2861    if (Reg == -1)
2862      return MatchOperand_NoMatch;
2863    SMLoc E = Parser.getTok().getLoc();
2864    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2865      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2866      if (Res != MatchOperand_Success)
2867        return Res;
2868      switch (LaneKind) {
2869      default:
2870        assert(0 && "unexpected lane kind!");
2871      case NoLanes:
2872        E = Parser.getTok().getLoc();
2873        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2874        break;
2875      case AllLanes:
2876        E = Parser.getTok().getLoc();
2877        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2878                                                                S, E));
2879        break;
2880      case IndexedLane:
2881        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2882                                                               LaneIndex,
2883                                                               false, S, E));
2884        break;
2885      }
2886      return MatchOperand_Success;
2887    }
2888    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2889      Reg = getDRegFromQReg(Reg);
2890      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2891      if (Res != MatchOperand_Success)
2892        return Res;
2893      switch (LaneKind) {
2894      default:
2895        assert(0 && "unexpected lane kind!");
2896      case NoLanes:
2897        E = Parser.getTok().getLoc();
2898        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2899        break;
2900      case AllLanes:
2901        E = Parser.getTok().getLoc();
2902        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2903                                                                S, E));
2904        break;
2905      case IndexedLane:
2906        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2907                                                               LaneIndex,
2908                                                               false, S, E));
2909        break;
2910      }
2911      return MatchOperand_Success;
2912    }
2913    Error(S, "vector register expected");
2914    return MatchOperand_ParseFail;
2915  }
2916
2917  if (Parser.getTok().isNot(AsmToken::LCurly))
2918    return MatchOperand_NoMatch;
2919
2920  Parser.Lex(); // Eat '{' token.
2921  SMLoc RegLoc = Parser.getTok().getLoc();
2922
2923  int Reg = tryParseRegister();
2924  if (Reg == -1) {
2925    Error(RegLoc, "register expected");
2926    return MatchOperand_ParseFail;
2927  }
2928  unsigned Count = 1;
2929  int Spacing = 0;
2930  unsigned FirstReg = Reg;
2931  // The list is of D registers, but we also allow Q regs and just interpret
2932  // them as the two D sub-registers.
2933  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2934    FirstReg = Reg = getDRegFromQReg(Reg);
2935    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2936                 // it's ambiguous with four-register single spaced.
2937    ++Reg;
2938    ++Count;
2939  }
2940  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2941    return MatchOperand_ParseFail;
2942
2943  while (Parser.getTok().is(AsmToken::Comma) ||
2944         Parser.getTok().is(AsmToken::Minus)) {
2945    if (Parser.getTok().is(AsmToken::Minus)) {
2946      if (!Spacing)
2947        Spacing = 1; // Register range implies a single spaced list.
2948      else if (Spacing == 2) {
2949        Error(Parser.getTok().getLoc(),
2950              "sequential registers in double spaced list");
2951        return MatchOperand_ParseFail;
2952      }
2953      Parser.Lex(); // Eat the minus.
2954      SMLoc EndLoc = Parser.getTok().getLoc();
2955      int EndReg = tryParseRegister();
2956      if (EndReg == -1) {
2957        Error(EndLoc, "register expected");
2958        return MatchOperand_ParseFail;
2959      }
2960      // Allow Q regs and just interpret them as the two D sub-registers.
2961      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2962        EndReg = getDRegFromQReg(EndReg) + 1;
2963      // If the register is the same as the start reg, there's nothing
2964      // more to do.
2965      if (Reg == EndReg)
2966        continue;
2967      // The register must be in the same register class as the first.
2968      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2969        Error(EndLoc, "invalid register in register list");
2970        return MatchOperand_ParseFail;
2971      }
2972      // Ranges must go from low to high.
2973      if (Reg > EndReg) {
2974        Error(EndLoc, "bad range in register list");
2975        return MatchOperand_ParseFail;
2976      }
2977      // Parse the lane specifier if present.
2978      VectorLaneTy NextLaneKind;
2979      unsigned NextLaneIndex;
2980      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2981        return MatchOperand_ParseFail;
2982      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2983        Error(EndLoc, "mismatched lane index in register list");
2984        return MatchOperand_ParseFail;
2985      }
2986      EndLoc = Parser.getTok().getLoc();
2987
2988      // Add all the registers in the range to the register list.
2989      Count += EndReg - Reg;
2990      Reg = EndReg;
2991      continue;
2992    }
2993    Parser.Lex(); // Eat the comma.
2994    RegLoc = Parser.getTok().getLoc();
2995    int OldReg = Reg;
2996    Reg = tryParseRegister();
2997    if (Reg == -1) {
2998      Error(RegLoc, "register expected");
2999      return MatchOperand_ParseFail;
3000    }
3001    // vector register lists must be contiguous.
3002    // It's OK to use the enumeration values directly here rather, as the
3003    // VFP register classes have the enum sorted properly.
3004    //
3005    // The list is of D registers, but we also allow Q regs and just interpret
3006    // them as the two D sub-registers.
3007    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3008      if (!Spacing)
3009        Spacing = 1; // Register range implies a single spaced list.
3010      else if (Spacing == 2) {
3011        Error(RegLoc,
3012              "invalid register in double-spaced list (must be 'D' register')");
3013        return MatchOperand_ParseFail;
3014      }
3015      Reg = getDRegFromQReg(Reg);
3016      if (Reg != OldReg + 1) {
3017        Error(RegLoc, "non-contiguous register range");
3018        return MatchOperand_ParseFail;
3019      }
3020      ++Reg;
3021      Count += 2;
3022      // Parse the lane specifier if present.
3023      VectorLaneTy NextLaneKind;
3024      unsigned NextLaneIndex;
3025      SMLoc EndLoc = Parser.getTok().getLoc();
3026      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3027        return MatchOperand_ParseFail;
3028      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3029        Error(EndLoc, "mismatched lane index in register list");
3030        return MatchOperand_ParseFail;
3031      }
3032      continue;
3033    }
3034    // Normal D register.
3035    // Figure out the register spacing (single or double) of the list if
3036    // we don't know it already.
3037    if (!Spacing)
3038      Spacing = 1 + (Reg == OldReg + 2);
3039
3040    // Just check that it's contiguous and keep going.
3041    if (Reg != OldReg + Spacing) {
3042      Error(RegLoc, "non-contiguous register range");
3043      return MatchOperand_ParseFail;
3044    }
3045    ++Count;
3046    // Parse the lane specifier if present.
3047    VectorLaneTy NextLaneKind;
3048    unsigned NextLaneIndex;
3049    SMLoc EndLoc = Parser.getTok().getLoc();
3050    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3051      return MatchOperand_ParseFail;
3052    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3053      Error(EndLoc, "mismatched lane index in register list");
3054      return MatchOperand_ParseFail;
3055    }
3056  }
3057
3058  SMLoc E = Parser.getTok().getLoc();
3059  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3060    Error(E, "'}' expected");
3061    return MatchOperand_ParseFail;
3062  }
3063  Parser.Lex(); // Eat '}' token.
3064
3065  switch (LaneKind) {
3066  default:
3067    assert(0 && "unexpected lane kind in register list.");
3068  case NoLanes:
3069    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3070                                                    (Spacing == 2), S, E));
3071    break;
3072  case AllLanes:
3073    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3074                                                            (Spacing == 2),
3075                                                            S, E));
3076    break;
3077  case IndexedLane:
3078    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3079                                                           LaneIndex,
3080                                                           (Spacing == 2),
3081                                                           S, E));
3082    break;
3083  }
3084  return MatchOperand_Success;
3085}
3086
3087/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3088ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3089parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3090  SMLoc S = Parser.getTok().getLoc();
3091  const AsmToken &Tok = Parser.getTok();
3092  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3093  StringRef OptStr = Tok.getString();
3094
3095  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3096    .Case("sy",    ARM_MB::SY)
3097    .Case("st",    ARM_MB::ST)
3098    .Case("sh",    ARM_MB::ISH)
3099    .Case("ish",   ARM_MB::ISH)
3100    .Case("shst",  ARM_MB::ISHST)
3101    .Case("ishst", ARM_MB::ISHST)
3102    .Case("nsh",   ARM_MB::NSH)
3103    .Case("un",    ARM_MB::NSH)
3104    .Case("nshst", ARM_MB::NSHST)
3105    .Case("unst",  ARM_MB::NSHST)
3106    .Case("osh",   ARM_MB::OSH)
3107    .Case("oshst", ARM_MB::OSHST)
3108    .Default(~0U);
3109
3110  if (Opt == ~0U)
3111    return MatchOperand_NoMatch;
3112
3113  Parser.Lex(); // Eat identifier token.
3114  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3115  return MatchOperand_Success;
3116}
3117
3118/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3119ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3120parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3121  SMLoc S = Parser.getTok().getLoc();
3122  const AsmToken &Tok = Parser.getTok();
3123  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3124  StringRef IFlagsStr = Tok.getString();
3125
3126  // An iflags string of "none" is interpreted to mean that none of the AIF
3127  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3128  unsigned IFlags = 0;
3129  if (IFlagsStr != "none") {
3130        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3131      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3132        .Case("a", ARM_PROC::A)
3133        .Case("i", ARM_PROC::I)
3134        .Case("f", ARM_PROC::F)
3135        .Default(~0U);
3136
3137      // If some specific iflag is already set, it means that some letter is
3138      // present more than once, this is not acceptable.
3139      if (Flag == ~0U || (IFlags & Flag))
3140        return MatchOperand_NoMatch;
3141
3142      IFlags |= Flag;
3143    }
3144  }
3145
3146  Parser.Lex(); // Eat identifier token.
3147  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3148  return MatchOperand_Success;
3149}
3150
3151/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3152ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3153parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3154  SMLoc S = Parser.getTok().getLoc();
3155  const AsmToken &Tok = Parser.getTok();
3156  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3157  StringRef Mask = Tok.getString();
3158
3159  if (isMClass()) {
3160    // See ARMv6-M 10.1.1
3161    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3162      .Case("apsr", 0)
3163      .Case("iapsr", 1)
3164      .Case("eapsr", 2)
3165      .Case("xpsr", 3)
3166      .Case("ipsr", 5)
3167      .Case("epsr", 6)
3168      .Case("iepsr", 7)
3169      .Case("msp", 8)
3170      .Case("psp", 9)
3171      .Case("primask", 16)
3172      .Case("basepri", 17)
3173      .Case("basepri_max", 18)
3174      .Case("faultmask", 19)
3175      .Case("control", 20)
3176      .Default(~0U);
3177
3178    if (FlagsVal == ~0U)
3179      return MatchOperand_NoMatch;
3180
3181    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3182      // basepri, basepri_max and faultmask only valid for V7m.
3183      return MatchOperand_NoMatch;
3184
3185    Parser.Lex(); // Eat identifier token.
3186    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3187    return MatchOperand_Success;
3188  }
3189
3190  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3191  size_t Start = 0, Next = Mask.find('_');
3192  StringRef Flags = "";
3193  std::string SpecReg = Mask.slice(Start, Next).lower();
3194  if (Next != StringRef::npos)
3195    Flags = Mask.slice(Next+1, Mask.size());
3196
3197  // FlagsVal contains the complete mask:
3198  // 3-0: Mask
3199  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3200  unsigned FlagsVal = 0;
3201
3202  if (SpecReg == "apsr") {
3203    FlagsVal = StringSwitch<unsigned>(Flags)
3204    .Case("nzcvq",  0x8) // same as CPSR_f
3205    .Case("g",      0x4) // same as CPSR_s
3206    .Case("nzcvqg", 0xc) // same as CPSR_fs
3207    .Default(~0U);
3208
3209    if (FlagsVal == ~0U) {
3210      if (!Flags.empty())
3211        return MatchOperand_NoMatch;
3212      else
3213        FlagsVal = 8; // No flag
3214    }
3215  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3216    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3217      Flags = "fc";
3218    for (int i = 0, e = Flags.size(); i != e; ++i) {
3219      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3220      .Case("c", 1)
3221      .Case("x", 2)
3222      .Case("s", 4)
3223      .Case("f", 8)
3224      .Default(~0U);
3225
3226      // If some specific flag is already set, it means that some letter is
3227      // present more than once, this is not acceptable.
3228      if (FlagsVal == ~0U || (FlagsVal & Flag))
3229        return MatchOperand_NoMatch;
3230      FlagsVal |= Flag;
3231    }
3232  } else // No match for special register.
3233    return MatchOperand_NoMatch;
3234
3235  // Special register without flags is NOT equivalent to "fc" flags.
3236  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3237  // two lines would enable gas compatibility at the expense of breaking
3238  // round-tripping.
3239  //
3240  // if (!FlagsVal)
3241  //  FlagsVal = 0x9;
3242
3243  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3244  if (SpecReg == "spsr")
3245    FlagsVal |= 16;
3246
3247  Parser.Lex(); // Eat identifier token.
3248  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3249  return MatchOperand_Success;
3250}
3251
3252ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3253parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3254            int Low, int High) {
3255  const AsmToken &Tok = Parser.getTok();
3256  if (Tok.isNot(AsmToken::Identifier)) {
3257    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3258    return MatchOperand_ParseFail;
3259  }
3260  StringRef ShiftName = Tok.getString();
3261  std::string LowerOp = Op.lower();
3262  std::string UpperOp = Op.upper();
3263  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3264    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3265    return MatchOperand_ParseFail;
3266  }
3267  Parser.Lex(); // Eat shift type token.
3268
3269  // There must be a '#' and a shift amount.
3270  if (Parser.getTok().isNot(AsmToken::Hash) &&
3271      Parser.getTok().isNot(AsmToken::Dollar)) {
3272    Error(Parser.getTok().getLoc(), "'#' expected");
3273    return MatchOperand_ParseFail;
3274  }
3275  Parser.Lex(); // Eat hash token.
3276
3277  const MCExpr *ShiftAmount;
3278  SMLoc Loc = Parser.getTok().getLoc();
3279  if (getParser().ParseExpression(ShiftAmount)) {
3280    Error(Loc, "illegal expression");
3281    return MatchOperand_ParseFail;
3282  }
3283  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3284  if (!CE) {
3285    Error(Loc, "constant expression expected");
3286    return MatchOperand_ParseFail;
3287  }
3288  int Val = CE->getValue();
3289  if (Val < Low || Val > High) {
3290    Error(Loc, "immediate value out of range");
3291    return MatchOperand_ParseFail;
3292  }
3293
3294  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3295
3296  return MatchOperand_Success;
3297}
3298
3299ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3300parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3301  const AsmToken &Tok = Parser.getTok();
3302  SMLoc S = Tok.getLoc();
3303  if (Tok.isNot(AsmToken::Identifier)) {
3304    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3305    return MatchOperand_ParseFail;
3306  }
3307  int Val = StringSwitch<int>(Tok.getString())
3308    .Case("be", 1)
3309    .Case("le", 0)
3310    .Default(-1);
3311  Parser.Lex(); // Eat the token.
3312
3313  if (Val == -1) {
3314    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3315    return MatchOperand_ParseFail;
3316  }
3317  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3318                                                                  getContext()),
3319                                           S, Parser.getTok().getLoc()));
3320  return MatchOperand_Success;
3321}
3322
3323/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3324/// instructions. Legal values are:
3325///     lsl #n  'n' in [0,31]
3326///     asr #n  'n' in [1,32]
3327///             n == 32 encoded as n == 0.
3328ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3329parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3330  const AsmToken &Tok = Parser.getTok();
3331  SMLoc S = Tok.getLoc();
3332  if (Tok.isNot(AsmToken::Identifier)) {
3333    Error(S, "shift operator 'asr' or 'lsl' expected");
3334    return MatchOperand_ParseFail;
3335  }
3336  StringRef ShiftName = Tok.getString();
3337  bool isASR;
3338  if (ShiftName == "lsl" || ShiftName == "LSL")
3339    isASR = false;
3340  else if (ShiftName == "asr" || ShiftName == "ASR")
3341    isASR = true;
3342  else {
3343    Error(S, "shift operator 'asr' or 'lsl' expected");
3344    return MatchOperand_ParseFail;
3345  }
3346  Parser.Lex(); // Eat the operator.
3347
3348  // A '#' and a shift amount.
3349  if (Parser.getTok().isNot(AsmToken::Hash) &&
3350      Parser.getTok().isNot(AsmToken::Dollar)) {
3351    Error(Parser.getTok().getLoc(), "'#' expected");
3352    return MatchOperand_ParseFail;
3353  }
3354  Parser.Lex(); // Eat hash token.
3355
3356  const MCExpr *ShiftAmount;
3357  SMLoc E = Parser.getTok().getLoc();
3358  if (getParser().ParseExpression(ShiftAmount)) {
3359    Error(E, "malformed shift expression");
3360    return MatchOperand_ParseFail;
3361  }
3362  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3363  if (!CE) {
3364    Error(E, "shift amount must be an immediate");
3365    return MatchOperand_ParseFail;
3366  }
3367
3368  int64_t Val = CE->getValue();
3369  if (isASR) {
3370    // Shift amount must be in [1,32]
3371    if (Val < 1 || Val > 32) {
3372      Error(E, "'asr' shift amount must be in range [1,32]");
3373      return MatchOperand_ParseFail;
3374    }
3375    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3376    if (isThumb() && Val == 32) {
3377      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3378      return MatchOperand_ParseFail;
3379    }
3380    if (Val == 32) Val = 0;
3381  } else {
3382    // Shift amount must be in [1,32]
3383    if (Val < 0 || Val > 31) {
3384      Error(E, "'lsr' shift amount must be in range [0,31]");
3385      return MatchOperand_ParseFail;
3386    }
3387  }
3388
3389  E = Parser.getTok().getLoc();
3390  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3391
3392  return MatchOperand_Success;
3393}
3394
3395/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3396/// of instructions. Legal values are:
3397///     ror #n  'n' in {0, 8, 16, 24}
3398ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3399parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3400  const AsmToken &Tok = Parser.getTok();
3401  SMLoc S = Tok.getLoc();
3402  if (Tok.isNot(AsmToken::Identifier))
3403    return MatchOperand_NoMatch;
3404  StringRef ShiftName = Tok.getString();
3405  if (ShiftName != "ror" && ShiftName != "ROR")
3406    return MatchOperand_NoMatch;
3407  Parser.Lex(); // Eat the operator.
3408
3409  // A '#' and a rotate amount.
3410  if (Parser.getTok().isNot(AsmToken::Hash) &&
3411      Parser.getTok().isNot(AsmToken::Dollar)) {
3412    Error(Parser.getTok().getLoc(), "'#' expected");
3413    return MatchOperand_ParseFail;
3414  }
3415  Parser.Lex(); // Eat hash token.
3416
3417  const MCExpr *ShiftAmount;
3418  SMLoc E = Parser.getTok().getLoc();
3419  if (getParser().ParseExpression(ShiftAmount)) {
3420    Error(E, "malformed rotate expression");
3421    return MatchOperand_ParseFail;
3422  }
3423  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3424  if (!CE) {
3425    Error(E, "rotate amount must be an immediate");
3426    return MatchOperand_ParseFail;
3427  }
3428
3429  int64_t Val = CE->getValue();
3430  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3431  // normally, zero is represented in asm by omitting the rotate operand
3432  // entirely.
3433  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3434    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3435    return MatchOperand_ParseFail;
3436  }
3437
3438  E = Parser.getTok().getLoc();
3439  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3440
3441  return MatchOperand_Success;
3442}
3443
3444ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3445parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3446  SMLoc S = Parser.getTok().getLoc();
3447  // The bitfield descriptor is really two operands, the LSB and the width.
3448  if (Parser.getTok().isNot(AsmToken::Hash) &&
3449      Parser.getTok().isNot(AsmToken::Dollar)) {
3450    Error(Parser.getTok().getLoc(), "'#' expected");
3451    return MatchOperand_ParseFail;
3452  }
3453  Parser.Lex(); // Eat hash token.
3454
3455  const MCExpr *LSBExpr;
3456  SMLoc E = Parser.getTok().getLoc();
3457  if (getParser().ParseExpression(LSBExpr)) {
3458    Error(E, "malformed immediate expression");
3459    return MatchOperand_ParseFail;
3460  }
3461  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3462  if (!CE) {
3463    Error(E, "'lsb' operand must be an immediate");
3464    return MatchOperand_ParseFail;
3465  }
3466
3467  int64_t LSB = CE->getValue();
3468  // The LSB must be in the range [0,31]
3469  if (LSB < 0 || LSB > 31) {
3470    Error(E, "'lsb' operand must be in the range [0,31]");
3471    return MatchOperand_ParseFail;
3472  }
3473  E = Parser.getTok().getLoc();
3474
3475  // Expect another immediate operand.
3476  if (Parser.getTok().isNot(AsmToken::Comma)) {
3477    Error(Parser.getTok().getLoc(), "too few operands");
3478    return MatchOperand_ParseFail;
3479  }
3480  Parser.Lex(); // Eat hash token.
3481  if (Parser.getTok().isNot(AsmToken::Hash) &&
3482      Parser.getTok().isNot(AsmToken::Dollar)) {
3483    Error(Parser.getTok().getLoc(), "'#' expected");
3484    return MatchOperand_ParseFail;
3485  }
3486  Parser.Lex(); // Eat hash token.
3487
3488  const MCExpr *WidthExpr;
3489  if (getParser().ParseExpression(WidthExpr)) {
3490    Error(E, "malformed immediate expression");
3491    return MatchOperand_ParseFail;
3492  }
3493  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3494  if (!CE) {
3495    Error(E, "'width' operand must be an immediate");
3496    return MatchOperand_ParseFail;
3497  }
3498
3499  int64_t Width = CE->getValue();
3500  // The LSB must be in the range [1,32-lsb]
3501  if (Width < 1 || Width > 32 - LSB) {
3502    Error(E, "'width' operand must be in the range [1,32-lsb]");
3503    return MatchOperand_ParseFail;
3504  }
3505  E = Parser.getTok().getLoc();
3506
3507  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3508
3509  return MatchOperand_Success;
3510}
3511
3512ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3513parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3514  // Check for a post-index addressing register operand. Specifically:
3515  // postidx_reg := '+' register {, shift}
3516  //              | '-' register {, shift}
3517  //              | register {, shift}
3518
3519  // This method must return MatchOperand_NoMatch without consuming any tokens
3520  // in the case where there is no match, as other alternatives take other
3521  // parse methods.
3522  AsmToken Tok = Parser.getTok();
3523  SMLoc S = Tok.getLoc();
3524  bool haveEaten = false;
3525  bool isAdd = true;
3526  int Reg = -1;
3527  if (Tok.is(AsmToken::Plus)) {
3528    Parser.Lex(); // Eat the '+' token.
3529    haveEaten = true;
3530  } else if (Tok.is(AsmToken::Minus)) {
3531    Parser.Lex(); // Eat the '-' token.
3532    isAdd = false;
3533    haveEaten = true;
3534  }
3535  if (Parser.getTok().is(AsmToken::Identifier))
3536    Reg = tryParseRegister();
3537  if (Reg == -1) {
3538    if (!haveEaten)
3539      return MatchOperand_NoMatch;
3540    Error(Parser.getTok().getLoc(), "register expected");
3541    return MatchOperand_ParseFail;
3542  }
3543  SMLoc E = Parser.getTok().getLoc();
3544
3545  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3546  unsigned ShiftImm = 0;
3547  if (Parser.getTok().is(AsmToken::Comma)) {
3548    Parser.Lex(); // Eat the ','.
3549    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3550      return MatchOperand_ParseFail;
3551  }
3552
3553  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3554                                                  ShiftImm, S, E));
3555
3556  return MatchOperand_Success;
3557}
3558
3559ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3560parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3561  // Check for a post-index addressing register operand. Specifically:
3562  // am3offset := '+' register
3563  //              | '-' register
3564  //              | register
3565  //              | # imm
3566  //              | # + imm
3567  //              | # - imm
3568
3569  // This method must return MatchOperand_NoMatch without consuming any tokens
3570  // in the case where there is no match, as other alternatives take other
3571  // parse methods.
3572  AsmToken Tok = Parser.getTok();
3573  SMLoc S = Tok.getLoc();
3574
3575  // Do immediates first, as we always parse those if we have a '#'.
3576  if (Parser.getTok().is(AsmToken::Hash) ||
3577      Parser.getTok().is(AsmToken::Dollar)) {
3578    Parser.Lex(); // Eat the '#'.
3579    // Explicitly look for a '-', as we need to encode negative zero
3580    // differently.
3581    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3582    const MCExpr *Offset;
3583    if (getParser().ParseExpression(Offset))
3584      return MatchOperand_ParseFail;
3585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3586    if (!CE) {
3587      Error(S, "constant expression expected");
3588      return MatchOperand_ParseFail;
3589    }
3590    SMLoc E = Tok.getLoc();
3591    // Negative zero is encoded as the flag value INT32_MIN.
3592    int32_t Val = CE->getValue();
3593    if (isNegative && Val == 0)
3594      Val = INT32_MIN;
3595
3596    Operands.push_back(
3597      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3598
3599    return MatchOperand_Success;
3600  }
3601
3602
3603  bool haveEaten = false;
3604  bool isAdd = true;
3605  int Reg = -1;
3606  if (Tok.is(AsmToken::Plus)) {
3607    Parser.Lex(); // Eat the '+' token.
3608    haveEaten = true;
3609  } else if (Tok.is(AsmToken::Minus)) {
3610    Parser.Lex(); // Eat the '-' token.
3611    isAdd = false;
3612    haveEaten = true;
3613  }
3614  if (Parser.getTok().is(AsmToken::Identifier))
3615    Reg = tryParseRegister();
3616  if (Reg == -1) {
3617    if (!haveEaten)
3618      return MatchOperand_NoMatch;
3619    Error(Parser.getTok().getLoc(), "register expected");
3620    return MatchOperand_ParseFail;
3621  }
3622  SMLoc E = Parser.getTok().getLoc();
3623
3624  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3625                                                  0, S, E));
3626
3627  return MatchOperand_Success;
3628}
3629
3630/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3631/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3632/// when they refer multiple MIOperands inside a single one.
3633bool ARMAsmParser::
3634cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3635             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3636  // Rt, Rt2
3637  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3638  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3639  // Create a writeback register dummy placeholder.
3640  Inst.addOperand(MCOperand::CreateReg(0));
3641  // addr
3642  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3643  // pred
3644  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3645  return true;
3646}
3647
3648/// cvtT2StrdPre - Convert parsed operands to MCInst.
3649/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3650/// when they refer multiple MIOperands inside a single one.
3651bool ARMAsmParser::
3652cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3653             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3654  // Create a writeback register dummy placeholder.
3655  Inst.addOperand(MCOperand::CreateReg(0));
3656  // Rt, Rt2
3657  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3658  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3659  // addr
3660  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3661  // pred
3662  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3663  return true;
3664}
3665
3666/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3667/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3668/// when they refer multiple MIOperands inside a single one.
3669bool ARMAsmParser::
3670cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3671                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3672  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3673
3674  // Create a writeback register dummy placeholder.
3675  Inst.addOperand(MCOperand::CreateImm(0));
3676
3677  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3678  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3679  return true;
3680}
3681
3682/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3683/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3684/// when they refer multiple MIOperands inside a single one.
3685bool ARMAsmParser::
3686cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3687                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3688  // Create a writeback register dummy placeholder.
3689  Inst.addOperand(MCOperand::CreateImm(0));
3690  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3691  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3692  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3693  return true;
3694}
3695
3696/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3697/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3698/// when they refer multiple MIOperands inside a single one.
3699bool ARMAsmParser::
3700cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3701                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3702  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3703
3704  // Create a writeback register dummy placeholder.
3705  Inst.addOperand(MCOperand::CreateImm(0));
3706
3707  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3708  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3709  return true;
3710}
3711
3712/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3713/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3714/// when they refer multiple MIOperands inside a single one.
3715bool ARMAsmParser::
3716cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3717                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3718  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3719
3720  // Create a writeback register dummy placeholder.
3721  Inst.addOperand(MCOperand::CreateImm(0));
3722
3723  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3724  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3725  return true;
3726}
3727
3728
3729/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3730/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3731/// when they refer multiple MIOperands inside a single one.
3732bool ARMAsmParser::
3733cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3734                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3735  // Create a writeback register dummy placeholder.
3736  Inst.addOperand(MCOperand::CreateImm(0));
3737  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3738  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3739  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3740  return true;
3741}
3742
3743/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3744/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3745/// when they refer multiple MIOperands inside a single one.
3746bool ARMAsmParser::
3747cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3748                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3749  // Create a writeback register dummy placeholder.
3750  Inst.addOperand(MCOperand::CreateImm(0));
3751  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3752  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3753  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3754  return true;
3755}
3756
3757/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3758/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3759/// when they refer multiple MIOperands inside a single one.
3760bool ARMAsmParser::
3761cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3762                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3763  // Create a writeback register dummy placeholder.
3764  Inst.addOperand(MCOperand::CreateImm(0));
3765  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3766  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3767  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3768  return true;
3769}
3770
3771/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3772/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3773/// when they refer multiple MIOperands inside a single one.
3774bool ARMAsmParser::
3775cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3776                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3777  // Rt
3778  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3779  // Create a writeback register dummy placeholder.
3780  Inst.addOperand(MCOperand::CreateImm(0));
3781  // addr
3782  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3783  // offset
3784  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3785  // pred
3786  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3787  return true;
3788}
3789
3790/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3791/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3792/// when they refer multiple MIOperands inside a single one.
3793bool ARMAsmParser::
3794cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3795                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3796  // Rt
3797  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3798  // Create a writeback register dummy placeholder.
3799  Inst.addOperand(MCOperand::CreateImm(0));
3800  // addr
3801  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3802  // offset
3803  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3804  // pred
3805  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3806  return true;
3807}
3808
3809/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3810/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3811/// when they refer multiple MIOperands inside a single one.
3812bool ARMAsmParser::
3813cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3814                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3815  // Create a writeback register dummy placeholder.
3816  Inst.addOperand(MCOperand::CreateImm(0));
3817  // Rt
3818  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3819  // addr
3820  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3821  // offset
3822  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3823  // pred
3824  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3825  return true;
3826}
3827
3828/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3829/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3830/// when they refer multiple MIOperands inside a single one.
3831bool ARMAsmParser::
3832cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3833                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3834  // Create a writeback register dummy placeholder.
3835  Inst.addOperand(MCOperand::CreateImm(0));
3836  // Rt
3837  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3838  // addr
3839  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3840  // offset
3841  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3842  // pred
3843  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3844  return true;
3845}
3846
3847/// cvtLdrdPre - Convert parsed operands to MCInst.
3848/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3849/// when they refer multiple MIOperands inside a single one.
3850bool ARMAsmParser::
3851cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3852           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3853  // Rt, Rt2
3854  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3855  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3856  // Create a writeback register dummy placeholder.
3857  Inst.addOperand(MCOperand::CreateImm(0));
3858  // addr
3859  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3860  // pred
3861  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3862  return true;
3863}
3864
3865/// cvtStrdPre - Convert parsed operands to MCInst.
3866/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3867/// when they refer multiple MIOperands inside a single one.
3868bool ARMAsmParser::
3869cvtStrdPre(MCInst &Inst, unsigned Opcode,
3870           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3871  // Create a writeback register dummy placeholder.
3872  Inst.addOperand(MCOperand::CreateImm(0));
3873  // Rt, Rt2
3874  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3875  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3876  // addr
3877  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3878  // pred
3879  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3880  return true;
3881}
3882
3883/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3884/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3885/// when they refer multiple MIOperands inside a single one.
3886bool ARMAsmParser::
3887cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3888                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3889  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3890  // Create a writeback register dummy placeholder.
3891  Inst.addOperand(MCOperand::CreateImm(0));
3892  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3893  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3894  return true;
3895}
3896
3897/// cvtThumbMultiple- Convert parsed operands to MCInst.
3898/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3899/// when they refer multiple MIOperands inside a single one.
3900bool ARMAsmParser::
3901cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3902           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3903  // The second source operand must be the same register as the destination
3904  // operand.
3905  if (Operands.size() == 6 &&
3906      (((ARMOperand*)Operands[3])->getReg() !=
3907       ((ARMOperand*)Operands[5])->getReg()) &&
3908      (((ARMOperand*)Operands[3])->getReg() !=
3909       ((ARMOperand*)Operands[4])->getReg())) {
3910    Error(Operands[3]->getStartLoc(),
3911          "destination register must match source register");
3912    return false;
3913  }
3914  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3915  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3916  // If we have a three-operand form, make sure to set Rn to be the operand
3917  // that isn't the same as Rd.
3918  unsigned RegOp = 4;
3919  if (Operands.size() == 6 &&
3920      ((ARMOperand*)Operands[4])->getReg() ==
3921        ((ARMOperand*)Operands[3])->getReg())
3922    RegOp = 5;
3923  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3924  Inst.addOperand(Inst.getOperand(0));
3925  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3926
3927  return true;
3928}
3929
3930bool ARMAsmParser::
3931cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3932              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3933  // Vd
3934  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3935  // Create a writeback register dummy placeholder.
3936  Inst.addOperand(MCOperand::CreateImm(0));
3937  // Vn
3938  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3939  // pred
3940  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3941  return true;
3942}
3943
3944bool ARMAsmParser::
3945cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3946                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3947  // Vd
3948  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3949  // Create a writeback register dummy placeholder.
3950  Inst.addOperand(MCOperand::CreateImm(0));
3951  // Vn
3952  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3953  // Vm
3954  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3955  // pred
3956  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3957  return true;
3958}
3959
3960bool ARMAsmParser::
3961cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3962              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3963  // Create a writeback register dummy placeholder.
3964  Inst.addOperand(MCOperand::CreateImm(0));
3965  // Vn
3966  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3967  // Vt
3968  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3969  // pred
3970  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3971  return true;
3972}
3973
3974bool ARMAsmParser::
3975cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3976                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3977  // Create a writeback register dummy placeholder.
3978  Inst.addOperand(MCOperand::CreateImm(0));
3979  // Vn
3980  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3981  // Vm
3982  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3983  // Vt
3984  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3985  // pred
3986  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3987  return true;
3988}
3989
3990/// Parse an ARM memory expression, return false if successful else return true
3991/// or an error.  The first token must be a '[' when called.
3992bool ARMAsmParser::
3993parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3994  SMLoc S, E;
3995  assert(Parser.getTok().is(AsmToken::LBrac) &&
3996         "Token is not a Left Bracket");
3997  S = Parser.getTok().getLoc();
3998  Parser.Lex(); // Eat left bracket token.
3999
4000  const AsmToken &BaseRegTok = Parser.getTok();
4001  int BaseRegNum = tryParseRegister();
4002  if (BaseRegNum == -1)
4003    return Error(BaseRegTok.getLoc(), "register expected");
4004
4005  // The next token must either be a comma or a closing bracket.
4006  const AsmToken &Tok = Parser.getTok();
4007  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4008    return Error(Tok.getLoc(), "malformed memory operand");
4009
4010  if (Tok.is(AsmToken::RBrac)) {
4011    E = Tok.getLoc();
4012    Parser.Lex(); // Eat right bracket token.
4013
4014    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4015                                             0, 0, false, S, E));
4016
4017    // If there's a pre-indexing writeback marker, '!', just add it as a token
4018    // operand. It's rather odd, but syntactically valid.
4019    if (Parser.getTok().is(AsmToken::Exclaim)) {
4020      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4021      Parser.Lex(); // Eat the '!'.
4022    }
4023
4024    return false;
4025  }
4026
4027  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4028  Parser.Lex(); // Eat the comma.
4029
4030  // If we have a ':', it's an alignment specifier.
4031  if (Parser.getTok().is(AsmToken::Colon)) {
4032    Parser.Lex(); // Eat the ':'.
4033    E = Parser.getTok().getLoc();
4034
4035    const MCExpr *Expr;
4036    if (getParser().ParseExpression(Expr))
4037     return true;
4038
4039    // The expression has to be a constant. Memory references with relocations
4040    // don't come through here, as they use the <label> forms of the relevant
4041    // instructions.
4042    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4043    if (!CE)
4044      return Error (E, "constant expression expected");
4045
4046    unsigned Align = 0;
4047    switch (CE->getValue()) {
4048    default:
4049      return Error(E,
4050                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4051    case 16:  Align = 2; break;
4052    case 32:  Align = 4; break;
4053    case 64:  Align = 8; break;
4054    case 128: Align = 16; break;
4055    case 256: Align = 32; break;
4056    }
4057
4058    // Now we should have the closing ']'
4059    E = Parser.getTok().getLoc();
4060    if (Parser.getTok().isNot(AsmToken::RBrac))
4061      return Error(E, "']' expected");
4062    Parser.Lex(); // Eat right bracket token.
4063
4064    // Don't worry about range checking the value here. That's handled by
4065    // the is*() predicates.
4066    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4067                                             ARM_AM::no_shift, 0, Align,
4068                                             false, S, E));
4069
4070    // If there's a pre-indexing writeback marker, '!', just add it as a token
4071    // operand.
4072    if (Parser.getTok().is(AsmToken::Exclaim)) {
4073      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4074      Parser.Lex(); // Eat the '!'.
4075    }
4076
4077    return false;
4078  }
4079
4080  // If we have a '#', it's an immediate offset, else assume it's a register
4081  // offset. Be friendly and also accept a plain integer (without a leading
4082  // hash) for gas compatibility.
4083  if (Parser.getTok().is(AsmToken::Hash) ||
4084      Parser.getTok().is(AsmToken::Dollar) ||
4085      Parser.getTok().is(AsmToken::Integer)) {
4086    if (Parser.getTok().isNot(AsmToken::Integer))
4087      Parser.Lex(); // Eat the '#'.
4088    E = Parser.getTok().getLoc();
4089
4090    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4091    const MCExpr *Offset;
4092    if (getParser().ParseExpression(Offset))
4093     return true;
4094
4095    // The expression has to be a constant. Memory references with relocations
4096    // don't come through here, as they use the <label> forms of the relevant
4097    // instructions.
4098    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4099    if (!CE)
4100      return Error (E, "constant expression expected");
4101
4102    // If the constant was #-0, represent it as INT32_MIN.
4103    int32_t Val = CE->getValue();
4104    if (isNegative && Val == 0)
4105      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4106
4107    // Now we should have the closing ']'
4108    E = Parser.getTok().getLoc();
4109    if (Parser.getTok().isNot(AsmToken::RBrac))
4110      return Error(E, "']' expected");
4111    Parser.Lex(); // Eat right bracket token.
4112
4113    // Don't worry about range checking the value here. That's handled by
4114    // the is*() predicates.
4115    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4116                                             ARM_AM::no_shift, 0, 0,
4117                                             false, S, E));
4118
4119    // If there's a pre-indexing writeback marker, '!', just add it as a token
4120    // operand.
4121    if (Parser.getTok().is(AsmToken::Exclaim)) {
4122      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4123      Parser.Lex(); // Eat the '!'.
4124    }
4125
4126    return false;
4127  }
4128
4129  // The register offset is optionally preceded by a '+' or '-'
4130  bool isNegative = false;
4131  if (Parser.getTok().is(AsmToken::Minus)) {
4132    isNegative = true;
4133    Parser.Lex(); // Eat the '-'.
4134  } else if (Parser.getTok().is(AsmToken::Plus)) {
4135    // Nothing to do.
4136    Parser.Lex(); // Eat the '+'.
4137  }
4138
4139  E = Parser.getTok().getLoc();
4140  int OffsetRegNum = tryParseRegister();
4141  if (OffsetRegNum == -1)
4142    return Error(E, "register expected");
4143
4144  // If there's a shift operator, handle it.
4145  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4146  unsigned ShiftImm = 0;
4147  if (Parser.getTok().is(AsmToken::Comma)) {
4148    Parser.Lex(); // Eat the ','.
4149    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4150      return true;
4151  }
4152
4153  // Now we should have the closing ']'
4154  E = Parser.getTok().getLoc();
4155  if (Parser.getTok().isNot(AsmToken::RBrac))
4156    return Error(E, "']' expected");
4157  Parser.Lex(); // Eat right bracket token.
4158
4159  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4160                                           ShiftType, ShiftImm, 0, isNegative,
4161                                           S, E));
4162
4163  // If there's a pre-indexing writeback marker, '!', just add it as a token
4164  // operand.
4165  if (Parser.getTok().is(AsmToken::Exclaim)) {
4166    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4167    Parser.Lex(); // Eat the '!'.
4168  }
4169
4170  return false;
4171}
4172
4173/// parseMemRegOffsetShift - one of these two:
4174///   ( lsl | lsr | asr | ror ) , # shift_amount
4175///   rrx
4176/// return true if it parses a shift otherwise it returns false.
4177bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4178                                          unsigned &Amount) {
4179  SMLoc Loc = Parser.getTok().getLoc();
4180  const AsmToken &Tok = Parser.getTok();
4181  if (Tok.isNot(AsmToken::Identifier))
4182    return true;
4183  StringRef ShiftName = Tok.getString();
4184  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4185      ShiftName == "asl" || ShiftName == "ASL")
4186    St = ARM_AM::lsl;
4187  else if (ShiftName == "lsr" || ShiftName == "LSR")
4188    St = ARM_AM::lsr;
4189  else if (ShiftName == "asr" || ShiftName == "ASR")
4190    St = ARM_AM::asr;
4191  else if (ShiftName == "ror" || ShiftName == "ROR")
4192    St = ARM_AM::ror;
4193  else if (ShiftName == "rrx" || ShiftName == "RRX")
4194    St = ARM_AM::rrx;
4195  else
4196    return Error(Loc, "illegal shift operator");
4197  Parser.Lex(); // Eat shift type token.
4198
4199  // rrx stands alone.
4200  Amount = 0;
4201  if (St != ARM_AM::rrx) {
4202    Loc = Parser.getTok().getLoc();
4203    // A '#' and a shift amount.
4204    const AsmToken &HashTok = Parser.getTok();
4205    if (HashTok.isNot(AsmToken::Hash) &&
4206        HashTok.isNot(AsmToken::Dollar))
4207      return Error(HashTok.getLoc(), "'#' expected");
4208    Parser.Lex(); // Eat hash token.
4209
4210    const MCExpr *Expr;
4211    if (getParser().ParseExpression(Expr))
4212      return true;
4213    // Range check the immediate.
4214    // lsl, ror: 0 <= imm <= 31
4215    // lsr, asr: 0 <= imm <= 32
4216    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4217    if (!CE)
4218      return Error(Loc, "shift amount must be an immediate");
4219    int64_t Imm = CE->getValue();
4220    if (Imm < 0 ||
4221        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4222        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4223      return Error(Loc, "immediate shift value out of range");
4224    Amount = Imm;
4225  }
4226
4227  return false;
4228}
4229
4230/// parseFPImm - A floating point immediate expression operand.
4231ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4232parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4233  SMLoc S = Parser.getTok().getLoc();
4234
4235  if (Parser.getTok().isNot(AsmToken::Hash) &&
4236      Parser.getTok().isNot(AsmToken::Dollar))
4237    return MatchOperand_NoMatch;
4238
4239  // Disambiguate the VMOV forms that can accept an FP immediate.
4240  // vmov.f32 <sreg>, #imm
4241  // vmov.f64 <dreg>, #imm
4242  // vmov.f32 <dreg>, #imm  @ vector f32x2
4243  // vmov.f32 <qreg>, #imm  @ vector f32x4
4244  //
4245  // There are also the NEON VMOV instructions which expect an
4246  // integer constant. Make sure we don't try to parse an FPImm
4247  // for these:
4248  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4249  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4250  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4251                           TyOp->getToken() != ".f64"))
4252    return MatchOperand_NoMatch;
4253
4254  Parser.Lex(); // Eat the '#'.
4255
4256  // Handle negation, as that still comes through as a separate token.
4257  bool isNegative = false;
4258  if (Parser.getTok().is(AsmToken::Minus)) {
4259    isNegative = true;
4260    Parser.Lex();
4261  }
4262  const AsmToken &Tok = Parser.getTok();
4263  if (Tok.is(AsmToken::Real)) {
4264    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4265    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4266    // If we had a '-' in front, toggle the sign bit.
4267    IntVal ^= (uint64_t)isNegative << 63;
4268    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4269    Parser.Lex(); // Eat the token.
4270    if (Val == -1) {
4271      TokError("floating point value out of range");
4272      return MatchOperand_ParseFail;
4273    }
4274    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4275    return MatchOperand_Success;
4276  }
4277  if (Tok.is(AsmToken::Integer)) {
4278    int64_t Val = Tok.getIntVal();
4279    Parser.Lex(); // Eat the token.
4280    if (Val > 255 || Val < 0) {
4281      TokError("encoded floating point value out of range");
4282      return MatchOperand_ParseFail;
4283    }
4284    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4285    return MatchOperand_Success;
4286  }
4287
4288  TokError("invalid floating point immediate");
4289  return MatchOperand_ParseFail;
4290}
4291/// Parse a arm instruction operand.  For now this parses the operand regardless
4292/// of the mnemonic.
4293bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4294                                StringRef Mnemonic) {
4295  SMLoc S, E;
4296
4297  // Check if the current operand has a custom associated parser, if so, try to
4298  // custom parse the operand, or fallback to the general approach.
4299  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4300  if (ResTy == MatchOperand_Success)
4301    return false;
4302  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4303  // there was a match, but an error occurred, in which case, just return that
4304  // the operand parsing failed.
4305  if (ResTy == MatchOperand_ParseFail)
4306    return true;
4307
4308  switch (getLexer().getKind()) {
4309  default:
4310    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4311    return true;
4312  case AsmToken::Identifier: {
4313    if (!tryParseRegisterWithWriteBack(Operands))
4314      return false;
4315    int Res = tryParseShiftRegister(Operands);
4316    if (Res == 0) // success
4317      return false;
4318    else if (Res == -1) // irrecoverable error
4319      return true;
4320    // If this is VMRS, check for the apsr_nzcv operand.
4321    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4322      S = Parser.getTok().getLoc();
4323      Parser.Lex();
4324      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4325      return false;
4326    }
4327
4328    // Fall though for the Identifier case that is not a register or a
4329    // special name.
4330  }
4331  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4332  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4333  case AsmToken::String:  // quoted label names.
4334  case AsmToken::Dot: {   // . as a branch target
4335    // This was not a register so parse other operands that start with an
4336    // identifier (like labels) as expressions and create them as immediates.
4337    const MCExpr *IdVal;
4338    S = Parser.getTok().getLoc();
4339    if (getParser().ParseExpression(IdVal))
4340      return true;
4341    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4342    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4343    return false;
4344  }
4345  case AsmToken::LBrac:
4346    return parseMemory(Operands);
4347  case AsmToken::LCurly:
4348    return parseRegisterList(Operands);
4349  case AsmToken::Dollar:
4350  case AsmToken::Hash: {
4351    // #42 -> immediate.
4352    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4353    S = Parser.getTok().getLoc();
4354    Parser.Lex();
4355    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4356    const MCExpr *ImmVal;
4357    if (getParser().ParseExpression(ImmVal))
4358      return true;
4359    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4360    if (CE) {
4361      int32_t Val = CE->getValue();
4362      if (isNegative && Val == 0)
4363        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4364    }
4365    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4366    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4367    return false;
4368  }
4369  case AsmToken::Colon: {
4370    // ":lower16:" and ":upper16:" expression prefixes
4371    // FIXME: Check it's an expression prefix,
4372    // e.g. (FOO - :lower16:BAR) isn't legal.
4373    ARMMCExpr::VariantKind RefKind;
4374    if (parsePrefix(RefKind))
4375      return true;
4376
4377    const MCExpr *SubExprVal;
4378    if (getParser().ParseExpression(SubExprVal))
4379      return true;
4380
4381    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4382                                                   getContext());
4383    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4384    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4385    return false;
4386  }
4387  }
4388}
4389
4390// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4391//  :lower16: and :upper16:.
4392bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4393  RefKind = ARMMCExpr::VK_ARM_None;
4394
4395  // :lower16: and :upper16: modifiers
4396  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4397  Parser.Lex(); // Eat ':'
4398
4399  if (getLexer().isNot(AsmToken::Identifier)) {
4400    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4401    return true;
4402  }
4403
4404  StringRef IDVal = Parser.getTok().getIdentifier();
4405  if (IDVal == "lower16") {
4406    RefKind = ARMMCExpr::VK_ARM_LO16;
4407  } else if (IDVal == "upper16") {
4408    RefKind = ARMMCExpr::VK_ARM_HI16;
4409  } else {
4410    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4411    return true;
4412  }
4413  Parser.Lex();
4414
4415  if (getLexer().isNot(AsmToken::Colon)) {
4416    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4417    return true;
4418  }
4419  Parser.Lex(); // Eat the last ':'
4420  return false;
4421}
4422
4423/// \brief Given a mnemonic, split out possible predication code and carry
4424/// setting letters to form a canonical mnemonic and flags.
4425//
4426// FIXME: Would be nice to autogen this.
4427// FIXME: This is a bit of a maze of special cases.
4428StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4429                                      unsigned &PredicationCode,
4430                                      bool &CarrySetting,
4431                                      unsigned &ProcessorIMod,
4432                                      StringRef &ITMask) {
4433  PredicationCode = ARMCC::AL;
4434  CarrySetting = false;
4435  ProcessorIMod = 0;
4436
4437  // Ignore some mnemonics we know aren't predicated forms.
4438  //
4439  // FIXME: Would be nice to autogen this.
4440  if ((Mnemonic == "movs" && isThumb()) ||
4441      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4442      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4443      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4444      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4445      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4446      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4447      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4448      Mnemonic == "fmuls")
4449    return Mnemonic;
4450
4451  // First, split out any predication code. Ignore mnemonics we know aren't
4452  // predicated but do have a carry-set and so weren't caught above.
4453  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4454      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4455      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4456      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4457    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4458      .Case("eq", ARMCC::EQ)
4459      .Case("ne", ARMCC::NE)
4460      .Case("hs", ARMCC::HS)
4461      .Case("cs", ARMCC::HS)
4462      .Case("lo", ARMCC::LO)
4463      .Case("cc", ARMCC::LO)
4464      .Case("mi", ARMCC::MI)
4465      .Case("pl", ARMCC::PL)
4466      .Case("vs", ARMCC::VS)
4467      .Case("vc", ARMCC::VC)
4468      .Case("hi", ARMCC::HI)
4469      .Case("ls", ARMCC::LS)
4470      .Case("ge", ARMCC::GE)
4471      .Case("lt", ARMCC::LT)
4472      .Case("gt", ARMCC::GT)
4473      .Case("le", ARMCC::LE)
4474      .Case("al", ARMCC::AL)
4475      .Default(~0U);
4476    if (CC != ~0U) {
4477      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4478      PredicationCode = CC;
4479    }
4480  }
4481
4482  // Next, determine if we have a carry setting bit. We explicitly ignore all
4483  // the instructions we know end in 's'.
4484  if (Mnemonic.endswith("s") &&
4485      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4486        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4487        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4488        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4489        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4490        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4491        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4492        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4493        (Mnemonic == "movs" && isThumb()))) {
4494    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4495    CarrySetting = true;
4496  }
4497
4498  // The "cps" instruction can have a interrupt mode operand which is glued into
4499  // the mnemonic. Check if this is the case, split it and parse the imod op
4500  if (Mnemonic.startswith("cps")) {
4501    // Split out any imod code.
4502    unsigned IMod =
4503      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4504      .Case("ie", ARM_PROC::IE)
4505      .Case("id", ARM_PROC::ID)
4506      .Default(~0U);
4507    if (IMod != ~0U) {
4508      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4509      ProcessorIMod = IMod;
4510    }
4511  }
4512
4513  // The "it" instruction has the condition mask on the end of the mnemonic.
4514  if (Mnemonic.startswith("it")) {
4515    ITMask = Mnemonic.slice(2, Mnemonic.size());
4516    Mnemonic = Mnemonic.slice(0, 2);
4517  }
4518
4519  return Mnemonic;
4520}
4521
4522/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4523/// inclusion of carry set or predication code operands.
4524//
4525// FIXME: It would be nice to autogen this.
4526void ARMAsmParser::
4527getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4528                      bool &CanAcceptPredicationCode) {
4529  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4530      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4531      Mnemonic == "add" || Mnemonic == "adc" ||
4532      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4533      Mnemonic == "orr" || Mnemonic == "mvn" ||
4534      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4535      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4536      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4537                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4538                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4539    CanAcceptCarrySet = true;
4540  } else
4541    CanAcceptCarrySet = false;
4542
4543  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4544      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4545      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4546      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4547      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4548      (Mnemonic == "clrex" && !isThumb()) ||
4549      (Mnemonic == "nop" && isThumbOne()) ||
4550      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4551        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4552        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4553      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4554       !isThumb()) ||
4555      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4556    CanAcceptPredicationCode = false;
4557  } else
4558    CanAcceptPredicationCode = true;
4559
4560  if (isThumb()) {
4561    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4562        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4563      CanAcceptPredicationCode = false;
4564  }
4565}
4566
4567bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4568                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4569  // FIXME: This is all horribly hacky. We really need a better way to deal
4570  // with optional operands like this in the matcher table.
4571
4572  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4573  // another does not. Specifically, the MOVW instruction does not. So we
4574  // special case it here and remove the defaulted (non-setting) cc_out
4575  // operand if that's the instruction we're trying to match.
4576  //
4577  // We do this as post-processing of the explicit operands rather than just
4578  // conditionally adding the cc_out in the first place because we need
4579  // to check the type of the parsed immediate operand.
4580  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4581      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4582      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4583      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4584    return true;
4585
4586  // Register-register 'add' for thumb does not have a cc_out operand
4587  // when there are only two register operands.
4588  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4589      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4590      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4591      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4592    return true;
4593  // Register-register 'add' for thumb does not have a cc_out operand
4594  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4595  // have to check the immediate range here since Thumb2 has a variant
4596  // that can handle a different range and has a cc_out operand.
4597  if (((isThumb() && Mnemonic == "add") ||
4598       (isThumbTwo() && Mnemonic == "sub")) &&
4599      Operands.size() == 6 &&
4600      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4601      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4602      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4603      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4604      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4605       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4606    return true;
4607  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4608  // imm0_4095 variant. That's the least-preferred variant when
4609  // selecting via the generic "add" mnemonic, so to know that we
4610  // should remove the cc_out operand, we have to explicitly check that
4611  // it's not one of the other variants. Ugh.
4612  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4613      Operands.size() == 6 &&
4614      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4615      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4616      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4617    // Nest conditions rather than one big 'if' statement for readability.
4618    //
4619    // If either register is a high reg, it's either one of the SP
4620    // variants (handled above) or a 32-bit encoding, so we just
4621    // check against T3.
4622    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4623         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4624        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4625      return false;
4626    // If both registers are low, we're in an IT block, and the immediate is
4627    // in range, we should use encoding T1 instead, which has a cc_out.
4628    if (inITBlock() &&
4629        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4630        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4631        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4632      return false;
4633
4634    // Otherwise, we use encoding T4, which does not have a cc_out
4635    // operand.
4636    return true;
4637  }
4638
4639  // The thumb2 multiply instruction doesn't have a CCOut register, so
4640  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4641  // use the 16-bit encoding or not.
4642  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4643      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4644      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4645      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4646      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4647      // If the registers aren't low regs, the destination reg isn't the
4648      // same as one of the source regs, or the cc_out operand is zero
4649      // outside of an IT block, we have to use the 32-bit encoding, so
4650      // remove the cc_out operand.
4651      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4652       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4653       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4654       !inITBlock() ||
4655       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4656        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4657        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4658        static_cast<ARMOperand*>(Operands[4])->getReg())))
4659    return true;
4660
4661  // Also check the 'mul' syntax variant that doesn't specify an explicit
4662  // destination register.
4663  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4664      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4665      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4666      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4667      // If the registers aren't low regs  or the cc_out operand is zero
4668      // outside of an IT block, we have to use the 32-bit encoding, so
4669      // remove the cc_out operand.
4670      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4671       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4672       !inITBlock()))
4673    return true;
4674
4675
4676
4677  // Register-register 'add/sub' for thumb does not have a cc_out operand
4678  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4679  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4680  // right, this will result in better diagnostics (which operand is off)
4681  // anyway.
4682  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4683      (Operands.size() == 5 || Operands.size() == 6) &&
4684      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4685      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4686      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4687    return true;
4688
4689  return false;
4690}
4691
4692static bool isDataTypeToken(StringRef Tok) {
4693  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4694    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4695    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4696    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4697    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4698    Tok == ".f" || Tok == ".d";
4699}
4700
4701// FIXME: This bit should probably be handled via an explicit match class
4702// in the .td files that matches the suffix instead of having it be
4703// a literal string token the way it is now.
4704static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4705  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4706}
4707
4708static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4709/// Parse an arm instruction mnemonic followed by its operands.
4710bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4711                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4712  // Apply mnemonic aliases before doing anything else, as the destination
4713  // mnemnonic may include suffices and we want to handle them normally.
4714  // The generic tblgen'erated code does this later, at the start of
4715  // MatchInstructionImpl(), but that's too late for aliases that include
4716  // any sort of suffix.
4717  unsigned AvailableFeatures = getAvailableFeatures();
4718  applyMnemonicAliases(Name, AvailableFeatures);
4719
4720  // First check for the ARM-specific .req directive.
4721  if (Parser.getTok().is(AsmToken::Identifier) &&
4722      Parser.getTok().getIdentifier() == ".req") {
4723    parseDirectiveReq(Name, NameLoc);
4724    // We always return 'error' for this, as we're done with this
4725    // statement and don't need to match the 'instruction."
4726    return true;
4727  }
4728
4729  // Create the leading tokens for the mnemonic, split by '.' characters.
4730  size_t Start = 0, Next = Name.find('.');
4731  StringRef Mnemonic = Name.slice(Start, Next);
4732
4733  // Split out the predication code and carry setting flag from the mnemonic.
4734  unsigned PredicationCode;
4735  unsigned ProcessorIMod;
4736  bool CarrySetting;
4737  StringRef ITMask;
4738  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4739                           ProcessorIMod, ITMask);
4740
4741  // In Thumb1, only the branch (B) instruction can be predicated.
4742  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4743    Parser.EatToEndOfStatement();
4744    return Error(NameLoc, "conditional execution not supported in Thumb1");
4745  }
4746
4747  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4748
4749  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4750  // is the mask as it will be for the IT encoding if the conditional
4751  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4752  // where the conditional bit0 is zero, the instruction post-processing
4753  // will adjust the mask accordingly.
4754  if (Mnemonic == "it") {
4755    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4756    if (ITMask.size() > 3) {
4757      Parser.EatToEndOfStatement();
4758      return Error(Loc, "too many conditions on IT instruction");
4759    }
4760    unsigned Mask = 8;
4761    for (unsigned i = ITMask.size(); i != 0; --i) {
4762      char pos = ITMask[i - 1];
4763      if (pos != 't' && pos != 'e') {
4764        Parser.EatToEndOfStatement();
4765        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4766      }
4767      Mask >>= 1;
4768      if (ITMask[i - 1] == 't')
4769        Mask |= 8;
4770    }
4771    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4772  }
4773
4774  // FIXME: This is all a pretty gross hack. We should automatically handle
4775  // optional operands like this via tblgen.
4776
4777  // Next, add the CCOut and ConditionCode operands, if needed.
4778  //
4779  // For mnemonics which can ever incorporate a carry setting bit or predication
4780  // code, our matching model involves us always generating CCOut and
4781  // ConditionCode operands to match the mnemonic "as written" and then we let
4782  // the matcher deal with finding the right instruction or generating an
4783  // appropriate error.
4784  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4785  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4786
4787  // If we had a carry-set on an instruction that can't do that, issue an
4788  // error.
4789  if (!CanAcceptCarrySet && CarrySetting) {
4790    Parser.EatToEndOfStatement();
4791    return Error(NameLoc, "instruction '" + Mnemonic +
4792                 "' can not set flags, but 's' suffix specified");
4793  }
4794  // If we had a predication code on an instruction that can't do that, issue an
4795  // error.
4796  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4797    Parser.EatToEndOfStatement();
4798    return Error(NameLoc, "instruction '" + Mnemonic +
4799                 "' is not predicable, but condition code specified");
4800  }
4801
4802  // Add the carry setting operand, if necessary.
4803  if (CanAcceptCarrySet) {
4804    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4805    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4806                                               Loc));
4807  }
4808
4809  // Add the predication code operand, if necessary.
4810  if (CanAcceptPredicationCode) {
4811    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4812                                      CarrySetting);
4813    Operands.push_back(ARMOperand::CreateCondCode(
4814                         ARMCC::CondCodes(PredicationCode), Loc));
4815  }
4816
4817  // Add the processor imod operand, if necessary.
4818  if (ProcessorIMod) {
4819    Operands.push_back(ARMOperand::CreateImm(
4820          MCConstantExpr::Create(ProcessorIMod, getContext()),
4821                                 NameLoc, NameLoc));
4822  }
4823
4824  // Add the remaining tokens in the mnemonic.
4825  while (Next != StringRef::npos) {
4826    Start = Next;
4827    Next = Name.find('.', Start + 1);
4828    StringRef ExtraToken = Name.slice(Start, Next);
4829
4830    // Some NEON instructions have an optional datatype suffix that is
4831    // completely ignored. Check for that.
4832    if (isDataTypeToken(ExtraToken) &&
4833        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4834      continue;
4835
4836    if (ExtraToken != ".n") {
4837      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4838      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4839    }
4840  }
4841
4842  // Read the remaining operands.
4843  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4844    // Read the first operand.
4845    if (parseOperand(Operands, Mnemonic)) {
4846      Parser.EatToEndOfStatement();
4847      return true;
4848    }
4849
4850    while (getLexer().is(AsmToken::Comma)) {
4851      Parser.Lex();  // Eat the comma.
4852
4853      // Parse and remember the operand.
4854      if (parseOperand(Operands, Mnemonic)) {
4855        Parser.EatToEndOfStatement();
4856        return true;
4857      }
4858    }
4859  }
4860
4861  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4862    SMLoc Loc = getLexer().getLoc();
4863    Parser.EatToEndOfStatement();
4864    return Error(Loc, "unexpected token in argument list");
4865  }
4866
4867  Parser.Lex(); // Consume the EndOfStatement
4868
4869  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4870  // do and don't have a cc_out optional-def operand. With some spot-checks
4871  // of the operand list, we can figure out which variant we're trying to
4872  // parse and adjust accordingly before actually matching. We shouldn't ever
4873  // try to remove a cc_out operand that was explicitly set on the the
4874  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4875  // table driven matcher doesn't fit well with the ARM instruction set.
4876  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4877    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4878    Operands.erase(Operands.begin() + 1);
4879    delete Op;
4880  }
4881
4882  // ARM mode 'blx' need special handling, as the register operand version
4883  // is predicable, but the label operand version is not. So, we can't rely
4884  // on the Mnemonic based checking to correctly figure out when to put
4885  // a k_CondCode operand in the list. If we're trying to match the label
4886  // version, remove the k_CondCode operand here.
4887  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4888      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4889    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4890    Operands.erase(Operands.begin() + 1);
4891    delete Op;
4892  }
4893
4894  // The vector-compare-to-zero instructions have a literal token "#0" at
4895  // the end that comes to here as an immediate operand. Convert it to a
4896  // token to play nicely with the matcher.
4897  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4898      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4899      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4900    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4901    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4902    if (CE && CE->getValue() == 0) {
4903      Operands.erase(Operands.begin() + 5);
4904      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4905      delete Op;
4906    }
4907  }
4908  // VCMP{E} does the same thing, but with a different operand count.
4909  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4910      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4911    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4912    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4913    if (CE && CE->getValue() == 0) {
4914      Operands.erase(Operands.begin() + 4);
4915      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4916      delete Op;
4917    }
4918  }
4919  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4920  // end. Convert it to a token here. Take care not to convert those
4921  // that should hit the Thumb2 encoding.
4922  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4923      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4924      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4925      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4926    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4927    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4928    if (CE && CE->getValue() == 0 &&
4929        (isThumbOne() ||
4930         // The cc_out operand matches the IT block.
4931         ((inITBlock() != CarrySetting) &&
4932         // Neither register operand is a high register.
4933         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4934          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4935      Operands.erase(Operands.begin() + 5);
4936      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4937      delete Op;
4938    }
4939  }
4940
4941  return false;
4942}
4943
4944// Validate context-sensitive operand constraints.
4945
4946// return 'true' if register list contains non-low GPR registers,
4947// 'false' otherwise. If Reg is in the register list or is HiReg, set
4948// 'containsReg' to true.
4949static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4950                                 unsigned HiReg, bool &containsReg) {
4951  containsReg = false;
4952  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4953    unsigned OpReg = Inst.getOperand(i).getReg();
4954    if (OpReg == Reg)
4955      containsReg = true;
4956    // Anything other than a low register isn't legal here.
4957    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4958      return true;
4959  }
4960  return false;
4961}
4962
4963// Check if the specified regisgter is in the register list of the inst,
4964// starting at the indicated operand number.
4965static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4966  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4967    unsigned OpReg = Inst.getOperand(i).getReg();
4968    if (OpReg == Reg)
4969      return true;
4970  }
4971  return false;
4972}
4973
4974// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4975// the ARMInsts array) instead. Getting that here requires awkward
4976// API changes, though. Better way?
4977namespace llvm {
4978extern const MCInstrDesc ARMInsts[];
4979}
4980static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4981  return ARMInsts[Opcode];
4982}
4983
4984// FIXME: We would really like to be able to tablegen'erate this.
4985bool ARMAsmParser::
4986validateInstruction(MCInst &Inst,
4987                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4988  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4989  SMLoc Loc = Operands[0]->getStartLoc();
4990  // Check the IT block state first.
4991  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4992  // being allowed in IT blocks, but not being predicable.  It just always
4993  // executes.
4994  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4995    unsigned bit = 1;
4996    if (ITState.FirstCond)
4997      ITState.FirstCond = false;
4998    else
4999      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5000    // The instruction must be predicable.
5001    if (!MCID.isPredicable())
5002      return Error(Loc, "instructions in IT block must be predicable");
5003    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5004    unsigned ITCond = bit ? ITState.Cond :
5005      ARMCC::getOppositeCondition(ITState.Cond);
5006    if (Cond != ITCond) {
5007      // Find the condition code Operand to get its SMLoc information.
5008      SMLoc CondLoc;
5009      for (unsigned i = 1; i < Operands.size(); ++i)
5010        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5011          CondLoc = Operands[i]->getStartLoc();
5012      return Error(CondLoc, "incorrect condition in IT block; got '" +
5013                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5014                   "', but expected '" +
5015                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5016    }
5017  // Check for non-'al' condition codes outside of the IT block.
5018  } else if (isThumbTwo() && MCID.isPredicable() &&
5019             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5020             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5021             Inst.getOpcode() != ARM::t2B)
5022    return Error(Loc, "predicated instructions must be in IT block");
5023
5024  switch (Inst.getOpcode()) {
5025  case ARM::LDRD:
5026  case ARM::LDRD_PRE:
5027  case ARM::LDRD_POST:
5028  case ARM::LDREXD: {
5029    // Rt2 must be Rt + 1.
5030    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5031    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5032    if (Rt2 != Rt + 1)
5033      return Error(Operands[3]->getStartLoc(),
5034                   "destination operands must be sequential");
5035    return false;
5036  }
5037  case ARM::STRD: {
5038    // Rt2 must be Rt + 1.
5039    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5040    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5041    if (Rt2 != Rt + 1)
5042      return Error(Operands[3]->getStartLoc(),
5043                   "source operands must be sequential");
5044    return false;
5045  }
5046  case ARM::STRD_PRE:
5047  case ARM::STRD_POST:
5048  case ARM::STREXD: {
5049    // Rt2 must be Rt + 1.
5050    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5051    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5052    if (Rt2 != Rt + 1)
5053      return Error(Operands[3]->getStartLoc(),
5054                   "source operands must be sequential");
5055    return false;
5056  }
5057  case ARM::SBFX:
5058  case ARM::UBFX: {
5059    // width must be in range [1, 32-lsb]
5060    unsigned lsb = Inst.getOperand(2).getImm();
5061    unsigned widthm1 = Inst.getOperand(3).getImm();
5062    if (widthm1 >= 32 - lsb)
5063      return Error(Operands[5]->getStartLoc(),
5064                   "bitfield width must be in range [1,32-lsb]");
5065    return false;
5066  }
5067  case ARM::tLDMIA: {
5068    // If we're parsing Thumb2, the .w variant is available and handles
5069    // most cases that are normally illegal for a Thumb1 LDM
5070    // instruction. We'll make the transformation in processInstruction()
5071    // if necessary.
5072    //
5073    // Thumb LDM instructions are writeback iff the base register is not
5074    // in the register list.
5075    unsigned Rn = Inst.getOperand(0).getReg();
5076    bool hasWritebackToken =
5077      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5078       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5079    bool listContainsBase;
5080    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5081      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5082                   "registers must be in range r0-r7");
5083    // If we should have writeback, then there should be a '!' token.
5084    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5085      return Error(Operands[2]->getStartLoc(),
5086                   "writeback operator '!' expected");
5087    // If we should not have writeback, there must not be a '!'. This is
5088    // true even for the 32-bit wide encodings.
5089    if (listContainsBase && hasWritebackToken)
5090      return Error(Operands[3]->getStartLoc(),
5091                   "writeback operator '!' not allowed when base register "
5092                   "in register list");
5093
5094    break;
5095  }
5096  case ARM::t2LDMIA_UPD: {
5097    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5098      return Error(Operands[4]->getStartLoc(),
5099                   "writeback operator '!' not allowed when base register "
5100                   "in register list");
5101    break;
5102  }
5103  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5104  // so only issue a diagnostic for thumb1. The instructions will be
5105  // switched to the t2 encodings in processInstruction() if necessary.
5106  case ARM::tPOP: {
5107    bool listContainsBase;
5108    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5109        !isThumbTwo())
5110      return Error(Operands[2]->getStartLoc(),
5111                   "registers must be in range r0-r7 or pc");
5112    break;
5113  }
5114  case ARM::tPUSH: {
5115    bool listContainsBase;
5116    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5117        !isThumbTwo())
5118      return Error(Operands[2]->getStartLoc(),
5119                   "registers must be in range r0-r7 or lr");
5120    break;
5121  }
5122  case ARM::tSTMIA_UPD: {
5123    bool listContainsBase;
5124    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5125      return Error(Operands[4]->getStartLoc(),
5126                   "registers must be in range r0-r7");
5127    break;
5128  }
5129  }
5130
5131  return false;
5132}
5133
5134static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5135  switch(Opc) {
5136  default: assert(0 && "unexpected opcode!");
5137  // VST1LN
5138  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5139  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5140  case ARM::VST1LNdWB_fixed_Asm_U8:
5141    Spacing = 1;
5142    return ARM::VST1LNd8_UPD;
5143  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5144  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5145  case ARM::VST1LNdWB_fixed_Asm_U16:
5146    Spacing = 1;
5147    return ARM::VST1LNd16_UPD;
5148  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5149  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5150  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5151    Spacing = 1;
5152    return ARM::VST1LNd32_UPD;
5153  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5154  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5155  case ARM::VST1LNdWB_register_Asm_U8:
5156    Spacing = 1;
5157    return ARM::VST1LNd8_UPD;
5158  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5159  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5160  case ARM::VST1LNdWB_register_Asm_U16:
5161    Spacing = 1;
5162    return ARM::VST1LNd16_UPD;
5163  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5164  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5165  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5166    Spacing = 1;
5167    return ARM::VST1LNd32_UPD;
5168  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5169  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5170  case ARM::VST1LNdAsm_U8:
5171    Spacing = 1;
5172    return ARM::VST1LNd8;
5173  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5174  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5175  case ARM::VST1LNdAsm_U16:
5176    Spacing = 1;
5177    return ARM::VST1LNd16;
5178  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5179  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5180  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5181    Spacing = 1;
5182    return ARM::VST1LNd32;
5183
5184  // VST2LN
5185  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5186  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5187  case ARM::VST2LNdWB_fixed_Asm_U8:
5188    Spacing = 1;
5189    return ARM::VST2LNd8_UPD;
5190  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5191  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5192  case ARM::VST2LNdWB_fixed_Asm_U16:
5193    Spacing = 1;
5194    return ARM::VST2LNd16_UPD;
5195  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5196  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5197  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5198    Spacing = 1;
5199    return ARM::VST2LNd32_UPD;
5200  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5201  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5202  case ARM::VST2LNqWB_fixed_Asm_U16:
5203    Spacing = 2;
5204    return ARM::VST2LNq16_UPD;
5205  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5206  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5207  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5208    Spacing = 2;
5209    return ARM::VST2LNq32_UPD;
5210
5211  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5212  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5213  case ARM::VST2LNdWB_register_Asm_U8:
5214    Spacing = 1;
5215    return ARM::VST2LNd8_UPD;
5216  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5217  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5218  case ARM::VST2LNdWB_register_Asm_U16:
5219    Spacing = 1;
5220    return ARM::VST2LNd16_UPD;
5221  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5222  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5223  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5224    Spacing = 1;
5225    return ARM::VST2LNd32_UPD;
5226  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5227  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5228  case ARM::VST2LNqWB_register_Asm_U16:
5229    Spacing = 2;
5230    return ARM::VST2LNq16_UPD;
5231  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5232  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5233  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5234    Spacing = 2;
5235    return ARM::VST2LNq32_UPD;
5236
5237  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5238  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5239  case ARM::VST2LNdAsm_U8:
5240    Spacing = 1;
5241    return ARM::VST2LNd8;
5242  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5243  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5244  case ARM::VST2LNdAsm_U16:
5245    Spacing = 1;
5246    return ARM::VST2LNd16;
5247  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5248  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5249  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5250    Spacing = 1;
5251    return ARM::VST2LNd32;
5252  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5253  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5254  case ARM::VST2LNqAsm_U16:
5255    Spacing = 2;
5256    return ARM::VST2LNq16;
5257  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5258  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5259  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5260    Spacing = 2;
5261    return ARM::VST2LNq32;
5262  }
5263}
5264
5265static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5266  switch(Opc) {
5267  default: assert(0 && "unexpected opcode!");
5268  // VLD1LN
5269  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5270  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5271  case ARM::VLD1LNdWB_fixed_Asm_U8:
5272    Spacing = 1;
5273    return ARM::VLD1LNd8_UPD;
5274  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5275  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5276  case ARM::VLD1LNdWB_fixed_Asm_U16:
5277    Spacing = 1;
5278    return ARM::VLD1LNd16_UPD;
5279  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5280  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5281  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5282    Spacing = 1;
5283    return ARM::VLD1LNd32_UPD;
5284  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5285  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5286  case ARM::VLD1LNdWB_register_Asm_U8:
5287    Spacing = 1;
5288    return ARM::VLD1LNd8_UPD;
5289  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5290  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5291  case ARM::VLD1LNdWB_register_Asm_U16:
5292    Spacing = 1;
5293    return ARM::VLD1LNd16_UPD;
5294  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5295  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5296  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5297    Spacing = 1;
5298    return ARM::VLD1LNd32_UPD;
5299  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5300  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5301  case ARM::VLD1LNdAsm_U8:
5302    Spacing = 1;
5303    return ARM::VLD1LNd8;
5304  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5305  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5306  case ARM::VLD1LNdAsm_U16:
5307    Spacing = 1;
5308    return ARM::VLD1LNd16;
5309  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5310  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5311  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5312    Spacing = 1;
5313    return ARM::VLD1LNd32;
5314
5315  // VLD2LN
5316  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5317  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5318  case ARM::VLD2LNdWB_fixed_Asm_U8:
5319    Spacing = 1;
5320    return ARM::VLD2LNd8_UPD;
5321  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5322  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5323  case ARM::VLD2LNdWB_fixed_Asm_U16:
5324    Spacing = 1;
5325    return ARM::VLD2LNd16_UPD;
5326  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5327  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5328  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5329    Spacing = 1;
5330    return ARM::VLD2LNd32_UPD;
5331  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5332  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5333  case ARM::VLD2LNqWB_fixed_Asm_U16:
5334    Spacing = 1;
5335    return ARM::VLD2LNq16_UPD;
5336  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5337  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5338  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5339    Spacing = 2;
5340    return ARM::VLD2LNq32_UPD;
5341  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5342  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5343  case ARM::VLD2LNdWB_register_Asm_U8:
5344    Spacing = 1;
5345    return ARM::VLD2LNd8_UPD;
5346  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5347  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5348  case ARM::VLD2LNdWB_register_Asm_U16:
5349    Spacing = 1;
5350    return ARM::VLD2LNd16_UPD;
5351  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5352  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5353  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5354    Spacing = 1;
5355    return ARM::VLD2LNd32_UPD;
5356  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5357  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5358  case ARM::VLD2LNqWB_register_Asm_U16:
5359    Spacing = 2;
5360    return ARM::VLD2LNq16_UPD;
5361  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5362  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5363  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5364    Spacing = 2;
5365    return ARM::VLD2LNq32_UPD;
5366  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5367  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5368  case ARM::VLD2LNdAsm_U8:
5369    Spacing = 1;
5370    return ARM::VLD2LNd8;
5371  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5372  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5373  case ARM::VLD2LNdAsm_U16:
5374    Spacing = 1;
5375    return ARM::VLD2LNd16;
5376  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5377  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5378  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5379    Spacing = 1;
5380    return ARM::VLD2LNd32;
5381  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5382  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5383  case ARM::VLD2LNqAsm_U16:
5384    Spacing = 2;
5385    return ARM::VLD2LNq16;
5386  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5387  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5388  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5389    Spacing = 2;
5390    return ARM::VLD2LNq32;
5391  }
5392}
5393
5394bool ARMAsmParser::
5395processInstruction(MCInst &Inst,
5396                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5397  switch (Inst.getOpcode()) {
5398  // Handle NEON VST complex aliases.
5399  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5400  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5401  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5402  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5403  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5404  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5405  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5406  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5407    MCInst TmpInst;
5408    // Shuffle the operands around so the lane index operand is in the
5409    // right place.
5410    unsigned Spacing;
5411    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5412    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5413    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5414    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5415    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5416    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5417    TmpInst.addOperand(Inst.getOperand(1)); // lane
5418    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5419    TmpInst.addOperand(Inst.getOperand(6));
5420    Inst = TmpInst;
5421    return true;
5422  }
5423
5424  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5425  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5426  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5427  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5428  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5429  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5430  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5431  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5432  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5433  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5434  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5435  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5436  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5437  case ARM::VST2LNqWB_register_Asm_U32: {
5438    MCInst TmpInst;
5439    // Shuffle the operands around so the lane index operand is in the
5440    // right place.
5441    unsigned Spacing;
5442    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5443    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5444    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5445    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5446    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5447    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5448    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5449                                            Spacing));
5450    TmpInst.addOperand(Inst.getOperand(1)); // lane
5451    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5452    TmpInst.addOperand(Inst.getOperand(6));
5453    Inst = TmpInst;
5454    return true;
5455  }
5456  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5457  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5458  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5459  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5460  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5461  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5462  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5463  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5464    MCInst TmpInst;
5465    // Shuffle the operands around so the lane index operand is in the
5466    // right place.
5467    unsigned Spacing;
5468    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5469    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5470    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5471    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5472    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5473    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5474    TmpInst.addOperand(Inst.getOperand(1)); // lane
5475    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5476    TmpInst.addOperand(Inst.getOperand(5));
5477    Inst = TmpInst;
5478    return true;
5479  }
5480
5481  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5482  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5483  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5484  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5485  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5486  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5487  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5488  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5489  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5490  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5491  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5492  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5493  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5494  case ARM::VST2LNqWB_fixed_Asm_U32: {
5495    MCInst TmpInst;
5496    // Shuffle the operands around so the lane index operand is in the
5497    // right place.
5498    unsigned Spacing;
5499    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5500    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5501    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5502    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5503    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5504    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5505    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5506                                            Spacing));
5507    TmpInst.addOperand(Inst.getOperand(1)); // lane
5508    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5509    TmpInst.addOperand(Inst.getOperand(5));
5510    Inst = TmpInst;
5511    return true;
5512  }
5513  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5514  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5515  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5516  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5517  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5518  case ARM::VST1LNdAsm_U32: {
5519    MCInst TmpInst;
5520    // Shuffle the operands around so the lane index operand is in the
5521    // right place.
5522    unsigned Spacing;
5523    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5524    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5525    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5526    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5527    TmpInst.addOperand(Inst.getOperand(1)); // lane
5528    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5529    TmpInst.addOperand(Inst.getOperand(5));
5530    Inst = TmpInst;
5531    return true;
5532  }
5533
5534  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5535  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5536  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5537  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5538  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5539  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5540  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5541  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5542  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5543    MCInst TmpInst;
5544    // Shuffle the operands around so the lane index operand is in the
5545    // right place.
5546    unsigned Spacing;
5547    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5548    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5549    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5550    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5551    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5552                                            Spacing));
5553    TmpInst.addOperand(Inst.getOperand(1)); // lane
5554    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5555    TmpInst.addOperand(Inst.getOperand(5));
5556    Inst = TmpInst;
5557    return true;
5558  }
5559  // Handle NEON VLD complex aliases.
5560  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5561  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5562  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5563  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5564  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5565  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5566  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5567  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5568    MCInst TmpInst;
5569    // Shuffle the operands around so the lane index operand is in the
5570    // right place.
5571    unsigned Spacing;
5572    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5573    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5574    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5575    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5576    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5577    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5578    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5579    TmpInst.addOperand(Inst.getOperand(1)); // lane
5580    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5581    TmpInst.addOperand(Inst.getOperand(6));
5582    Inst = TmpInst;
5583    return true;
5584  }
5585
5586  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5587  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5588  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5589  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5590  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5591  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5592  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5593  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5594  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5595  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5596  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5597  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5598  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5599  case ARM::VLD2LNqWB_register_Asm_U32: {
5600    MCInst TmpInst;
5601    // Shuffle the operands around so the lane index operand is in the
5602    // right place.
5603    unsigned Spacing;
5604    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5605    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5606    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5607                                            Spacing));
5608    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5609    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5610    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5611    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5612    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5613    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5614                                            Spacing));
5615    TmpInst.addOperand(Inst.getOperand(1)); // lane
5616    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5617    TmpInst.addOperand(Inst.getOperand(6));
5618    Inst = TmpInst;
5619    return true;
5620  }
5621
5622  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5623  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5624  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5625  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5626  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5627  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5628  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5629  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5630    MCInst TmpInst;
5631    // Shuffle the operands around so the lane index operand is in the
5632    // right place.
5633    unsigned Spacing;
5634    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5635    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5636    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5637    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5638    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5639    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5640    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5641    TmpInst.addOperand(Inst.getOperand(1)); // lane
5642    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5643    TmpInst.addOperand(Inst.getOperand(5));
5644    Inst = TmpInst;
5645    return true;
5646  }
5647
5648  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5649  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5650  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5651  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5652  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5653  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5654  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5655  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5656  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5657  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5658  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5659  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5660  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5661  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5662    MCInst TmpInst;
5663    // Shuffle the operands around so the lane index operand is in the
5664    // right place.
5665    unsigned Spacing;
5666    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5667    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5668    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5669                                            Spacing));
5670    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5671    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5672    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5673    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5674    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5675    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5676                                            Spacing));
5677    TmpInst.addOperand(Inst.getOperand(1)); // lane
5678    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5679    TmpInst.addOperand(Inst.getOperand(5));
5680    Inst = TmpInst;
5681    return true;
5682  }
5683
5684  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5685  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5686  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5687  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5688  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5689  case ARM::VLD1LNdAsm_U32: {
5690    MCInst TmpInst;
5691    // Shuffle the operands around so the lane index operand is in the
5692    // right place.
5693    unsigned Spacing;
5694    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5695    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5696    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5697    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5698    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5699    TmpInst.addOperand(Inst.getOperand(1)); // lane
5700    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5701    TmpInst.addOperand(Inst.getOperand(5));
5702    Inst = TmpInst;
5703    return true;
5704  }
5705
5706  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5707  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5708  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5709  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5710  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5711  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5712  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5713  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5714  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5715  case ARM::VLD2LNqAsm_U32: {
5716    MCInst TmpInst;
5717    // Shuffle the operands around so the lane index operand is in the
5718    // right place.
5719    unsigned Spacing;
5720    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5721    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5722    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5723                                            Spacing));
5724    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5725    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5726    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5727    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5728                                            Spacing));
5729    TmpInst.addOperand(Inst.getOperand(1)); // lane
5730    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5731    TmpInst.addOperand(Inst.getOperand(5));
5732    Inst = TmpInst;
5733    return true;
5734  }
5735  // Handle the Thumb2 mode MOV complex aliases.
5736  case ARM::t2MOVsr:
5737  case ARM::t2MOVSsr: {
5738    // Which instruction to expand to depends on the CCOut operand and
5739    // whether we're in an IT block if the register operands are low
5740    // registers.
5741    bool isNarrow = false;
5742    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5743        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5744        isARMLowRegister(Inst.getOperand(2).getReg()) &&
5745        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
5746        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
5747      isNarrow = true;
5748    MCInst TmpInst;
5749    unsigned newOpc;
5750    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
5751    default: llvm_unreachable("unexpected opcode!");
5752    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
5753    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
5754    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
5755    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
5756    }
5757    TmpInst.setOpcode(newOpc);
5758    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5759    if (isNarrow)
5760      TmpInst.addOperand(MCOperand::CreateReg(
5761          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5762    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5763    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5764    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5765    TmpInst.addOperand(Inst.getOperand(5));
5766    if (!isNarrow)
5767      TmpInst.addOperand(MCOperand::CreateReg(
5768          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5769    Inst = TmpInst;
5770    return true;
5771  }
5772  case ARM::t2MOVsi:
5773  case ARM::t2MOVSsi: {
5774    // Which instruction to expand to depends on the CCOut operand and
5775    // whether we're in an IT block if the register operands are low
5776    // registers.
5777    bool isNarrow = false;
5778    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5779        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5780        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5781      isNarrow = true;
5782    MCInst TmpInst;
5783    unsigned newOpc;
5784    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5785    default: llvm_unreachable("unexpected opcode!");
5786    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5787    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5788    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5789    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5790    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
5791    }
5792    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5793    if (Ammount == 32) Ammount = 0;
5794    TmpInst.setOpcode(newOpc);
5795    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5796    if (isNarrow)
5797      TmpInst.addOperand(MCOperand::CreateReg(
5798          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5799    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5800    if (newOpc != ARM::t2RRX)
5801      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5802    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5803    TmpInst.addOperand(Inst.getOperand(4));
5804    if (!isNarrow)
5805      TmpInst.addOperand(MCOperand::CreateReg(
5806          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5807    Inst = TmpInst;
5808    return true;
5809  }
5810  // Handle the ARM mode MOV complex aliases.
5811  case ARM::ASRr:
5812  case ARM::LSRr:
5813  case ARM::LSLr:
5814  case ARM::RORr: {
5815    ARM_AM::ShiftOpc ShiftTy;
5816    switch(Inst.getOpcode()) {
5817    default: llvm_unreachable("unexpected opcode!");
5818    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5819    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5820    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5821    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5822    }
5823    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5824    MCInst TmpInst;
5825    TmpInst.setOpcode(ARM::MOVsr);
5826    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5827    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5828    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5829    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5830    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5831    TmpInst.addOperand(Inst.getOperand(4));
5832    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5833    Inst = TmpInst;
5834    return true;
5835  }
5836  case ARM::ASRi:
5837  case ARM::LSRi:
5838  case ARM::LSLi:
5839  case ARM::RORi: {
5840    ARM_AM::ShiftOpc ShiftTy;
5841    switch(Inst.getOpcode()) {
5842    default: llvm_unreachable("unexpected opcode!");
5843    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5844    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5845    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5846    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5847    }
5848    // A shift by zero is a plain MOVr, not a MOVsi.
5849    unsigned Amt = Inst.getOperand(2).getImm();
5850    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5851    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5852    MCInst TmpInst;
5853    TmpInst.setOpcode(Opc);
5854    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5855    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5856    if (Opc == ARM::MOVsi)
5857      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5858    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5859    TmpInst.addOperand(Inst.getOperand(4));
5860    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5861    Inst = TmpInst;
5862    return true;
5863  }
5864  case ARM::RRXi: {
5865    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5866    MCInst TmpInst;
5867    TmpInst.setOpcode(ARM::MOVsi);
5868    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5869    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5870    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5871    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5872    TmpInst.addOperand(Inst.getOperand(3));
5873    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5874    Inst = TmpInst;
5875    return true;
5876  }
5877  case ARM::t2LDMIA_UPD: {
5878    // If this is a load of a single register, then we should use
5879    // a post-indexed LDR instruction instead, per the ARM ARM.
5880    if (Inst.getNumOperands() != 5)
5881      return false;
5882    MCInst TmpInst;
5883    TmpInst.setOpcode(ARM::t2LDR_POST);
5884    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5885    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5886    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5887    TmpInst.addOperand(MCOperand::CreateImm(4));
5888    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5889    TmpInst.addOperand(Inst.getOperand(3));
5890    Inst = TmpInst;
5891    return true;
5892  }
5893  case ARM::t2STMDB_UPD: {
5894    // If this is a store of a single register, then we should use
5895    // a pre-indexed STR instruction instead, per the ARM ARM.
5896    if (Inst.getNumOperands() != 5)
5897      return false;
5898    MCInst TmpInst;
5899    TmpInst.setOpcode(ARM::t2STR_PRE);
5900    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5901    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5902    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5903    TmpInst.addOperand(MCOperand::CreateImm(-4));
5904    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5905    TmpInst.addOperand(Inst.getOperand(3));
5906    Inst = TmpInst;
5907    return true;
5908  }
5909  case ARM::LDMIA_UPD:
5910    // If this is a load of a single register via a 'pop', then we should use
5911    // a post-indexed LDR instruction instead, per the ARM ARM.
5912    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5913        Inst.getNumOperands() == 5) {
5914      MCInst TmpInst;
5915      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5916      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5917      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5918      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5919      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5920      TmpInst.addOperand(MCOperand::CreateImm(4));
5921      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5922      TmpInst.addOperand(Inst.getOperand(3));
5923      Inst = TmpInst;
5924      return true;
5925    }
5926    break;
5927  case ARM::STMDB_UPD:
5928    // If this is a store of a single register via a 'push', then we should use
5929    // a pre-indexed STR instruction instead, per the ARM ARM.
5930    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5931        Inst.getNumOperands() == 5) {
5932      MCInst TmpInst;
5933      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5934      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5935      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5936      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5937      TmpInst.addOperand(MCOperand::CreateImm(-4));
5938      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5939      TmpInst.addOperand(Inst.getOperand(3));
5940      Inst = TmpInst;
5941    }
5942    break;
5943  case ARM::t2ADDri12:
5944    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5945    // mnemonic was used (not "addw"), encoding T3 is preferred.
5946    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5947        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5948      break;
5949    Inst.setOpcode(ARM::t2ADDri);
5950    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5951    break;
5952  case ARM::t2SUBri12:
5953    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5954    // mnemonic was used (not "subw"), encoding T3 is preferred.
5955    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5956        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5957      break;
5958    Inst.setOpcode(ARM::t2SUBri);
5959    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5960    break;
5961  case ARM::tADDi8:
5962    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5963    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5964    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5965    // to encoding T1 if <Rd> is omitted."
5966    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5967      Inst.setOpcode(ARM::tADDi3);
5968      return true;
5969    }
5970    break;
5971  case ARM::tSUBi8:
5972    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5973    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5974    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5975    // to encoding T1 if <Rd> is omitted."
5976    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5977      Inst.setOpcode(ARM::tSUBi3);
5978      return true;
5979    }
5980    break;
5981  case ARM::t2ADDrr: {
5982    // If the destination and first source operand are the same, and
5983    // there's no setting of the flags, use encoding T2 instead of T3.
5984    // Note that this is only for ADD, not SUB. This mirrors the system
5985    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5986    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5987        Inst.getOperand(5).getReg() != 0 ||
5988        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5989         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5990      break;
5991    MCInst TmpInst;
5992    TmpInst.setOpcode(ARM::tADDhirr);
5993    TmpInst.addOperand(Inst.getOperand(0));
5994    TmpInst.addOperand(Inst.getOperand(0));
5995    TmpInst.addOperand(Inst.getOperand(2));
5996    TmpInst.addOperand(Inst.getOperand(3));
5997    TmpInst.addOperand(Inst.getOperand(4));
5998    Inst = TmpInst;
5999    return true;
6000  }
6001  case ARM::tB:
6002    // A Thumb conditional branch outside of an IT block is a tBcc.
6003    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6004      Inst.setOpcode(ARM::tBcc);
6005      return true;
6006    }
6007    break;
6008  case ARM::t2B:
6009    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6010    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6011      Inst.setOpcode(ARM::t2Bcc);
6012      return true;
6013    }
6014    break;
6015  case ARM::t2Bcc:
6016    // If the conditional is AL or we're in an IT block, we really want t2B.
6017    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6018      Inst.setOpcode(ARM::t2B);
6019      return true;
6020    }
6021    break;
6022  case ARM::tBcc:
6023    // If the conditional is AL, we really want tB.
6024    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6025      Inst.setOpcode(ARM::tB);
6026      return true;
6027    }
6028    break;
6029  case ARM::tLDMIA: {
6030    // If the register list contains any high registers, or if the writeback
6031    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6032    // instead if we're in Thumb2. Otherwise, this should have generated
6033    // an error in validateInstruction().
6034    unsigned Rn = Inst.getOperand(0).getReg();
6035    bool hasWritebackToken =
6036      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6037       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6038    bool listContainsBase;
6039    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6040        (!listContainsBase && !hasWritebackToken) ||
6041        (listContainsBase && hasWritebackToken)) {
6042      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6043      assert (isThumbTwo());
6044      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6045      // If we're switching to the updating version, we need to insert
6046      // the writeback tied operand.
6047      if (hasWritebackToken)
6048        Inst.insert(Inst.begin(),
6049                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6050      return true;
6051    }
6052    break;
6053  }
6054  case ARM::tSTMIA_UPD: {
6055    // If the register list contains any high registers, we need to use
6056    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6057    // should have generated an error in validateInstruction().
6058    unsigned Rn = Inst.getOperand(0).getReg();
6059    bool listContainsBase;
6060    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6061      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6062      assert (isThumbTwo());
6063      Inst.setOpcode(ARM::t2STMIA_UPD);
6064      return true;
6065    }
6066    break;
6067  }
6068  case ARM::tPOP: {
6069    bool listContainsBase;
6070    // If the register list contains any high registers, we need to use
6071    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6072    // should have generated an error in validateInstruction().
6073    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6074      return false;
6075    assert (isThumbTwo());
6076    Inst.setOpcode(ARM::t2LDMIA_UPD);
6077    // Add the base register and writeback operands.
6078    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6079    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6080    return true;
6081  }
6082  case ARM::tPUSH: {
6083    bool listContainsBase;
6084    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6085      return false;
6086    assert (isThumbTwo());
6087    Inst.setOpcode(ARM::t2STMDB_UPD);
6088    // Add the base register and writeback operands.
6089    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6090    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6091    return true;
6092  }
6093  case ARM::t2MOVi: {
6094    // If we can use the 16-bit encoding and the user didn't explicitly
6095    // request the 32-bit variant, transform it here.
6096    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6097        Inst.getOperand(1).getImm() <= 255 &&
6098        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6099         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6100        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6101        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6102         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6103      // The operands aren't in the same order for tMOVi8...
6104      MCInst TmpInst;
6105      TmpInst.setOpcode(ARM::tMOVi8);
6106      TmpInst.addOperand(Inst.getOperand(0));
6107      TmpInst.addOperand(Inst.getOperand(4));
6108      TmpInst.addOperand(Inst.getOperand(1));
6109      TmpInst.addOperand(Inst.getOperand(2));
6110      TmpInst.addOperand(Inst.getOperand(3));
6111      Inst = TmpInst;
6112      return true;
6113    }
6114    break;
6115  }
6116  case ARM::t2MOVr: {
6117    // If we can use the 16-bit encoding and the user didn't explicitly
6118    // request the 32-bit variant, transform it here.
6119    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6120        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6121        Inst.getOperand(2).getImm() == ARMCC::AL &&
6122        Inst.getOperand(4).getReg() == ARM::CPSR &&
6123        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6124         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6125      // The operands aren't the same for tMOV[S]r... (no cc_out)
6126      MCInst TmpInst;
6127      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6128      TmpInst.addOperand(Inst.getOperand(0));
6129      TmpInst.addOperand(Inst.getOperand(1));
6130      TmpInst.addOperand(Inst.getOperand(2));
6131      TmpInst.addOperand(Inst.getOperand(3));
6132      Inst = TmpInst;
6133      return true;
6134    }
6135    break;
6136  }
6137  case ARM::t2SXTH:
6138  case ARM::t2SXTB:
6139  case ARM::t2UXTH:
6140  case ARM::t2UXTB: {
6141    // If we can use the 16-bit encoding and the user didn't explicitly
6142    // request the 32-bit variant, transform it here.
6143    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6144        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6145        Inst.getOperand(2).getImm() == 0 &&
6146        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6147         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6148      unsigned NewOpc;
6149      switch (Inst.getOpcode()) {
6150      default: llvm_unreachable("Illegal opcode!");
6151      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6152      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6153      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6154      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6155      }
6156      // The operands aren't the same for thumb1 (no rotate operand).
6157      MCInst TmpInst;
6158      TmpInst.setOpcode(NewOpc);
6159      TmpInst.addOperand(Inst.getOperand(0));
6160      TmpInst.addOperand(Inst.getOperand(1));
6161      TmpInst.addOperand(Inst.getOperand(3));
6162      TmpInst.addOperand(Inst.getOperand(4));
6163      Inst = TmpInst;
6164      return true;
6165    }
6166    break;
6167  }
6168  case ARM::MOVsi: {
6169    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6170    if (SOpc == ARM_AM::rrx) return false;
6171    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6172      // Shifting by zero is accepted as a vanilla 'MOVr'
6173      MCInst TmpInst;
6174      TmpInst.setOpcode(ARM::MOVr);
6175      TmpInst.addOperand(Inst.getOperand(0));
6176      TmpInst.addOperand(Inst.getOperand(1));
6177      TmpInst.addOperand(Inst.getOperand(3));
6178      TmpInst.addOperand(Inst.getOperand(4));
6179      TmpInst.addOperand(Inst.getOperand(5));
6180      Inst = TmpInst;
6181      return true;
6182    }
6183    return false;
6184  }
6185  case ARM::ANDrsi:
6186  case ARM::ORRrsi:
6187  case ARM::EORrsi:
6188  case ARM::BICrsi:
6189  case ARM::SUBrsi:
6190  case ARM::ADDrsi: {
6191    unsigned newOpc;
6192    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
6193    if (SOpc == ARM_AM::rrx) return false;
6194    switch (Inst.getOpcode()) {
6195    default: assert(0 && "unexpected opcode!");
6196    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
6197    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
6198    case ARM::EORrsi: newOpc = ARM::EORrr; break;
6199    case ARM::BICrsi: newOpc = ARM::BICrr; break;
6200    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
6201    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
6202    }
6203    // If the shift is by zero, use the non-shifted instruction definition.
6204    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
6205      MCInst TmpInst;
6206      TmpInst.setOpcode(newOpc);
6207      TmpInst.addOperand(Inst.getOperand(0));
6208      TmpInst.addOperand(Inst.getOperand(1));
6209      TmpInst.addOperand(Inst.getOperand(2));
6210      TmpInst.addOperand(Inst.getOperand(4));
6211      TmpInst.addOperand(Inst.getOperand(5));
6212      TmpInst.addOperand(Inst.getOperand(6));
6213      Inst = TmpInst;
6214      return true;
6215    }
6216    return false;
6217  }
6218  case ARM::t2IT: {
6219    // The mask bits for all but the first condition are represented as
6220    // the low bit of the condition code value implies 't'. We currently
6221    // always have 1 implies 't', so XOR toggle the bits if the low bit
6222    // of the condition code is zero. The encoding also expects the low
6223    // bit of the condition to be encoded as bit 4 of the mask operand,
6224    // so mask that in if needed
6225    MCOperand &MO = Inst.getOperand(1);
6226    unsigned Mask = MO.getImm();
6227    unsigned OrigMask = Mask;
6228    unsigned TZ = CountTrailingZeros_32(Mask);
6229    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6230      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6231      for (unsigned i = 3; i != TZ; --i)
6232        Mask ^= 1 << i;
6233    } else
6234      Mask |= 0x10;
6235    MO.setImm(Mask);
6236
6237    // Set up the IT block state according to the IT instruction we just
6238    // matched.
6239    assert(!inITBlock() && "nested IT blocks?!");
6240    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6241    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6242    ITState.CurPosition = 0;
6243    ITState.FirstCond = true;
6244    break;
6245  }
6246  }
6247  return false;
6248}
6249
6250unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6251  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6252  // suffix depending on whether they're in an IT block or not.
6253  unsigned Opc = Inst.getOpcode();
6254  const MCInstrDesc &MCID = getInstDesc(Opc);
6255  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6256    assert(MCID.hasOptionalDef() &&
6257           "optionally flag setting instruction missing optional def operand");
6258    assert(MCID.NumOperands == Inst.getNumOperands() &&
6259           "operand count mismatch!");
6260    // Find the optional-def operand (cc_out).
6261    unsigned OpNo;
6262    for (OpNo = 0;
6263         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6264         ++OpNo)
6265      ;
6266    // If we're parsing Thumb1, reject it completely.
6267    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6268      return Match_MnemonicFail;
6269    // If we're parsing Thumb2, which form is legal depends on whether we're
6270    // in an IT block.
6271    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6272        !inITBlock())
6273      return Match_RequiresITBlock;
6274    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6275        inITBlock())
6276      return Match_RequiresNotITBlock;
6277  }
6278  // Some high-register supporting Thumb1 encodings only allow both registers
6279  // to be from r0-r7 when in Thumb2.
6280  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6281           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6282           isARMLowRegister(Inst.getOperand(2).getReg()))
6283    return Match_RequiresThumb2;
6284  // Others only require ARMv6 or later.
6285  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6286           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6287           isARMLowRegister(Inst.getOperand(1).getReg()))
6288    return Match_RequiresV6;
6289  return Match_Success;
6290}
6291
6292bool ARMAsmParser::
6293MatchAndEmitInstruction(SMLoc IDLoc,
6294                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6295                        MCStreamer &Out) {
6296  MCInst Inst;
6297  unsigned ErrorInfo;
6298  unsigned MatchResult;
6299  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6300  switch (MatchResult) {
6301  default: break;
6302  case Match_Success:
6303    // Context sensitive operand constraints aren't handled by the matcher,
6304    // so check them here.
6305    if (validateInstruction(Inst, Operands)) {
6306      // Still progress the IT block, otherwise one wrong condition causes
6307      // nasty cascading errors.
6308      forwardITPosition();
6309      return true;
6310    }
6311
6312    // Some instructions need post-processing to, for example, tweak which
6313    // encoding is selected. Loop on it while changes happen so the
6314    // individual transformations can chain off each other. E.g.,
6315    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6316    while (processInstruction(Inst, Operands))
6317      ;
6318
6319    // Only move forward at the very end so that everything in validate
6320    // and process gets a consistent answer about whether we're in an IT
6321    // block.
6322    forwardITPosition();
6323
6324    Out.EmitInstruction(Inst);
6325    return false;
6326  case Match_MissingFeature:
6327    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6328    return true;
6329  case Match_InvalidOperand: {
6330    SMLoc ErrorLoc = IDLoc;
6331    if (ErrorInfo != ~0U) {
6332      if (ErrorInfo >= Operands.size())
6333        return Error(IDLoc, "too few operands for instruction");
6334
6335      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6336      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6337    }
6338
6339    return Error(ErrorLoc, "invalid operand for instruction");
6340  }
6341  case Match_MnemonicFail:
6342    return Error(IDLoc, "invalid instruction");
6343  case Match_ConversionFail:
6344    // The converter function will have already emited a diagnostic.
6345    return true;
6346  case Match_RequiresNotITBlock:
6347    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6348  case Match_RequiresITBlock:
6349    return Error(IDLoc, "instruction only valid inside IT block");
6350  case Match_RequiresV6:
6351    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6352  case Match_RequiresThumb2:
6353    return Error(IDLoc, "instruction variant requires Thumb2");
6354  }
6355
6356  llvm_unreachable("Implement any new match types added!");
6357  return true;
6358}
6359
6360/// parseDirective parses the arm specific directives
6361bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6362  StringRef IDVal = DirectiveID.getIdentifier();
6363  if (IDVal == ".word")
6364    return parseDirectiveWord(4, DirectiveID.getLoc());
6365  else if (IDVal == ".thumb")
6366    return parseDirectiveThumb(DirectiveID.getLoc());
6367  else if (IDVal == ".arm")
6368    return parseDirectiveARM(DirectiveID.getLoc());
6369  else if (IDVal == ".thumb_func")
6370    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6371  else if (IDVal == ".code")
6372    return parseDirectiveCode(DirectiveID.getLoc());
6373  else if (IDVal == ".syntax")
6374    return parseDirectiveSyntax(DirectiveID.getLoc());
6375  else if (IDVal == ".unreq")
6376    return parseDirectiveUnreq(DirectiveID.getLoc());
6377  else if (IDVal == ".arch")
6378    return parseDirectiveArch(DirectiveID.getLoc());
6379  else if (IDVal == ".eabi_attribute")
6380    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6381  return true;
6382}
6383
6384/// parseDirectiveWord
6385///  ::= .word [ expression (, expression)* ]
6386bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6387  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6388    for (;;) {
6389      const MCExpr *Value;
6390      if (getParser().ParseExpression(Value))
6391        return true;
6392
6393      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6394
6395      if (getLexer().is(AsmToken::EndOfStatement))
6396        break;
6397
6398      // FIXME: Improve diagnostic.
6399      if (getLexer().isNot(AsmToken::Comma))
6400        return Error(L, "unexpected token in directive");
6401      Parser.Lex();
6402    }
6403  }
6404
6405  Parser.Lex();
6406  return false;
6407}
6408
6409/// parseDirectiveThumb
6410///  ::= .thumb
6411bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6412  if (getLexer().isNot(AsmToken::EndOfStatement))
6413    return Error(L, "unexpected token in directive");
6414  Parser.Lex();
6415
6416  if (!isThumb())
6417    SwitchMode();
6418  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6419  return false;
6420}
6421
6422/// parseDirectiveARM
6423///  ::= .arm
6424bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6425  if (getLexer().isNot(AsmToken::EndOfStatement))
6426    return Error(L, "unexpected token in directive");
6427  Parser.Lex();
6428
6429  if (isThumb())
6430    SwitchMode();
6431  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6432  return false;
6433}
6434
6435/// parseDirectiveThumbFunc
6436///  ::= .thumbfunc symbol_name
6437bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6438  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6439  bool isMachO = MAI.hasSubsectionsViaSymbols();
6440  StringRef Name;
6441  bool needFuncName = true;
6442
6443  // Darwin asm has (optionally) function name after .thumb_func direction
6444  // ELF doesn't
6445  if (isMachO) {
6446    const AsmToken &Tok = Parser.getTok();
6447    if (Tok.isNot(AsmToken::EndOfStatement)) {
6448      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6449        return Error(L, "unexpected token in .thumb_func directive");
6450      Name = Tok.getIdentifier();
6451      Parser.Lex(); // Consume the identifier token.
6452      needFuncName = false;
6453    }
6454  }
6455
6456  if (getLexer().isNot(AsmToken::EndOfStatement))
6457    return Error(L, "unexpected token in directive");
6458
6459  // Eat the end of statement and any blank lines that follow.
6460  while (getLexer().is(AsmToken::EndOfStatement))
6461    Parser.Lex();
6462
6463  // FIXME: assuming function name will be the line following .thumb_func
6464  // We really should be checking the next symbol definition even if there's
6465  // stuff in between.
6466  if (needFuncName) {
6467    Name = Parser.getTok().getIdentifier();
6468  }
6469
6470  // Mark symbol as a thumb symbol.
6471  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6472  getParser().getStreamer().EmitThumbFunc(Func);
6473  return false;
6474}
6475
6476/// parseDirectiveSyntax
6477///  ::= .syntax unified | divided
6478bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6479  const AsmToken &Tok = Parser.getTok();
6480  if (Tok.isNot(AsmToken::Identifier))
6481    return Error(L, "unexpected token in .syntax directive");
6482  StringRef Mode = Tok.getString();
6483  if (Mode == "unified" || Mode == "UNIFIED")
6484    Parser.Lex();
6485  else if (Mode == "divided" || Mode == "DIVIDED")
6486    return Error(L, "'.syntax divided' arm asssembly not supported");
6487  else
6488    return Error(L, "unrecognized syntax mode in .syntax directive");
6489
6490  if (getLexer().isNot(AsmToken::EndOfStatement))
6491    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6492  Parser.Lex();
6493
6494  // TODO tell the MC streamer the mode
6495  // getParser().getStreamer().Emit???();
6496  return false;
6497}
6498
6499/// parseDirectiveCode
6500///  ::= .code 16 | 32
6501bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6502  const AsmToken &Tok = Parser.getTok();
6503  if (Tok.isNot(AsmToken::Integer))
6504    return Error(L, "unexpected token in .code directive");
6505  int64_t Val = Parser.getTok().getIntVal();
6506  if (Val == 16)
6507    Parser.Lex();
6508  else if (Val == 32)
6509    Parser.Lex();
6510  else
6511    return Error(L, "invalid operand to .code directive");
6512
6513  if (getLexer().isNot(AsmToken::EndOfStatement))
6514    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6515  Parser.Lex();
6516
6517  if (Val == 16) {
6518    if (!isThumb())
6519      SwitchMode();
6520    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6521  } else {
6522    if (isThumb())
6523      SwitchMode();
6524    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6525  }
6526
6527  return false;
6528}
6529
6530/// parseDirectiveReq
6531///  ::= name .req registername
6532bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6533  Parser.Lex(); // Eat the '.req' token.
6534  unsigned Reg;
6535  SMLoc SRegLoc, ERegLoc;
6536  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6537    Parser.EatToEndOfStatement();
6538    return Error(SRegLoc, "register name expected");
6539  }
6540
6541  // Shouldn't be anything else.
6542  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6543    Parser.EatToEndOfStatement();
6544    return Error(Parser.getTok().getLoc(),
6545                 "unexpected input in .req directive.");
6546  }
6547
6548  Parser.Lex(); // Consume the EndOfStatement
6549
6550  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6551    return Error(SRegLoc, "redefinition of '" + Name +
6552                          "' does not match original.");
6553
6554  return false;
6555}
6556
6557/// parseDirectiveUneq
6558///  ::= .unreq registername
6559bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6560  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6561    Parser.EatToEndOfStatement();
6562    return Error(L, "unexpected input in .unreq directive.");
6563  }
6564  RegisterReqs.erase(Parser.getTok().getIdentifier());
6565  Parser.Lex(); // Eat the identifier.
6566  return false;
6567}
6568
6569/// parseDirectiveArch
6570///  ::= .arch token
6571bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6572  return true;
6573}
6574
6575/// parseDirectiveEabiAttr
6576///  ::= .eabi_attribute int, int
6577bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6578  return true;
6579}
6580
6581extern "C" void LLVMInitializeARMAsmLexer();
6582
6583/// Force static initialization.
6584extern "C" void LLVMInitializeARMAsmParser() {
6585  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6586  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6587  LLVMInitializeARMAsmLexer();
6588}
6589
6590#define GET_REGISTER_MATCHER
6591#define GET_MATCHER_IMPLEMENTATION
6592#include "ARMGenAsmMatcher.inc"
6593