ARMAsmParser.cpp revision 2dd674fdce68f8fd59d78a3bbab2cf5b8d220290
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_FPImmediate,
274    k_MemBarrierOpt,
275    k_Memory,
276    k_PostIndexRegister,
277    k_MSRMask,
278    k_ProcIFlags,
279    k_VectorIndex,
280    k_Register,
281    k_RegisterList,
282    k_DPRRegisterList,
283    k_SPRRegisterList,
284    k_VectorList,
285    k_VectorListAllLanes,
286    k_VectorListIndexed,
287    k_ShiftedRegister,
288    k_ShiftedImmediate,
289    k_ShifterImmediate,
290    k_RotateImmediate,
291    k_BitfieldDescriptor,
292    k_Token
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296  SmallVector<unsigned, 8> Registers;
297
298  union {
299    struct {
300      ARMCC::CondCodes Val;
301    } CC;
302
303    struct {
304      unsigned Val;
305    } Cop;
306
307    struct {
308      unsigned Val;
309    } CoprocOption;
310
311    struct {
312      unsigned Mask:4;
313    } ITMask;
314
315    struct {
316      ARM_MB::MemBOpt Val;
317    } MBOpt;
318
319    struct {
320      ARM_PROC::IFlags Val;
321    } IFlags;
322
323    struct {
324      unsigned Val;
325    } MMask;
326
327    struct {
328      const char *Data;
329      unsigned Length;
330    } Tok;
331
332    struct {
333      unsigned RegNum;
334    } Reg;
335
336    // A vector register list is a sequential list of 1 to 4 registers.
337    struct {
338      unsigned RegNum;
339      unsigned Count;
340      unsigned LaneIndex;
341      bool isDoubleSpaced;
342    } VectorList;
343
344    struct {
345      unsigned Val;
346    } VectorIndex;
347
348    struct {
349      const MCExpr *Val;
350    } Imm;
351
352    struct {
353      unsigned Val;       // encoded 8-bit representation
354    } FPImm;
355
356    /// Combined record for all forms of ARM address expressions.
357    struct {
358      unsigned BaseRegNum;
359      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
360      // was specified.
361      const MCConstantExpr *OffsetImm;  // Offset immediate value
362      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
363      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
364      unsigned ShiftImm;        // shift for OffsetReg.
365      unsigned Alignment;       // 0 = no alignment specified
366                                // n = alignment in bytes (2, 4, 8, 16, or 32)
367      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
368    } Memory;
369
370    struct {
371      unsigned RegNum;
372      bool isAdd;
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned ShiftImm;
375    } PostIdxReg;
376
377    struct {
378      bool isASR;
379      unsigned Imm;
380    } ShifterImm;
381    struct {
382      ARM_AM::ShiftOpc ShiftTy;
383      unsigned SrcReg;
384      unsigned ShiftReg;
385      unsigned ShiftImm;
386    } RegShiftedReg;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftImm;
391    } RegShiftedImm;
392    struct {
393      unsigned Imm;
394    } RotImm;
395    struct {
396      unsigned LSB;
397      unsigned Width;
398    } Bitfield;
399  };
400
401  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
402public:
403  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
404    Kind = o.Kind;
405    StartLoc = o.StartLoc;
406    EndLoc = o.EndLoc;
407    switch (Kind) {
408    case k_CondCode:
409      CC = o.CC;
410      break;
411    case k_ITCondMask:
412      ITMask = o.ITMask;
413      break;
414    case k_Token:
415      Tok = o.Tok;
416      break;
417    case k_CCOut:
418    case k_Register:
419      Reg = o.Reg;
420      break;
421    case k_RegisterList:
422    case k_DPRRegisterList:
423    case k_SPRRegisterList:
424      Registers = o.Registers;
425      break;
426    case k_VectorList:
427    case k_VectorListAllLanes:
428    case k_VectorListIndexed:
429      VectorList = o.VectorList;
430      break;
431    case k_CoprocNum:
432    case k_CoprocReg:
433      Cop = o.Cop;
434      break;
435    case k_CoprocOption:
436      CoprocOption = o.CoprocOption;
437      break;
438    case k_Immediate:
439      Imm = o.Imm;
440      break;
441    case k_FPImmediate:
442      FPImm = o.FPImm;
443      break;
444    case k_MemBarrierOpt:
445      MBOpt = o.MBOpt;
446      break;
447    case k_Memory:
448      Memory = o.Memory;
449      break;
450    case k_PostIndexRegister:
451      PostIdxReg = o.PostIdxReg;
452      break;
453    case k_MSRMask:
454      MMask = o.MMask;
455      break;
456    case k_ProcIFlags:
457      IFlags = o.IFlags;
458      break;
459    case k_ShifterImmediate:
460      ShifterImm = o.ShifterImm;
461      break;
462    case k_ShiftedRegister:
463      RegShiftedReg = o.RegShiftedReg;
464      break;
465    case k_ShiftedImmediate:
466      RegShiftedImm = o.RegShiftedImm;
467      break;
468    case k_RotateImmediate:
469      RotImm = o.RotImm;
470      break;
471    case k_BitfieldDescriptor:
472      Bitfield = o.Bitfield;
473      break;
474    case k_VectorIndex:
475      VectorIndex = o.VectorIndex;
476      break;
477    }
478  }
479
480  /// getStartLoc - Get the location of the first token of this operand.
481  SMLoc getStartLoc() const { return StartLoc; }
482  /// getEndLoc - Get the location of the last token of this operand.
483  SMLoc getEndLoc() const { return EndLoc; }
484
485  ARMCC::CondCodes getCondCode() const {
486    assert(Kind == k_CondCode && "Invalid access!");
487    return CC.Val;
488  }
489
490  unsigned getCoproc() const {
491    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
492    return Cop.Val;
493  }
494
495  StringRef getToken() const {
496    assert(Kind == k_Token && "Invalid access!");
497    return StringRef(Tok.Data, Tok.Length);
498  }
499
500  unsigned getReg() const {
501    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
502    return Reg.RegNum;
503  }
504
505  const SmallVectorImpl<unsigned> &getRegList() const {
506    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
507            Kind == k_SPRRegisterList) && "Invalid access!");
508    return Registers;
509  }
510
511  const MCExpr *getImm() const {
512    assert(isImm() && "Invalid access!");
513    return Imm.Val;
514  }
515
516  unsigned getFPImm() const {
517    assert(Kind == k_FPImmediate && "Invalid access!");
518    return FPImm.Val;
519  }
520
521  unsigned getVectorIndex() const {
522    assert(Kind == k_VectorIndex && "Invalid access!");
523    return VectorIndex.Val;
524  }
525
526  ARM_MB::MemBOpt getMemBarrierOpt() const {
527    assert(Kind == k_MemBarrierOpt && "Invalid access!");
528    return MBOpt.Val;
529  }
530
531  ARM_PROC::IFlags getProcIFlags() const {
532    assert(Kind == k_ProcIFlags && "Invalid access!");
533    return IFlags.Val;
534  }
535
536  unsigned getMSRMask() const {
537    assert(Kind == k_MSRMask && "Invalid access!");
538    return MMask.Val;
539  }
540
541  bool isCoprocNum() const { return Kind == k_CoprocNum; }
542  bool isCoprocReg() const { return Kind == k_CoprocReg; }
543  bool isCoprocOption() const { return Kind == k_CoprocOption; }
544  bool isCondCode() const { return Kind == k_CondCode; }
545  bool isCCOut() const { return Kind == k_CCOut; }
546  bool isITMask() const { return Kind == k_ITCondMask; }
547  bool isITCondCode() const { return Kind == k_CondCode; }
548  bool isImm() const { return Kind == k_Immediate; }
549  bool isFPImm() const { return Kind == k_FPImmediate; }
550  bool isFBits16() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 0 && Value <= 16;
556  }
557  bool isFBits32() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return Value >= 1 && Value <= 32;
563  }
564  bool isImm8s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
570  }
571  bool isImm0_1020s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
577  }
578  bool isImm0_508s4() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
584  }
585  bool isImm0_255() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 256;
591  }
592  bool isImm0_1() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 2;
598  }
599  bool isImm0_3() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 4;
605  }
606  bool isImm0_7() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 8;
612  }
613  bool isImm0_15() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 16;
619  }
620  bool isImm0_31() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 32;
626  }
627  bool isImm0_63() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value >= 0 && Value < 64;
633  }
634  bool isImm8() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 8;
640  }
641  bool isImm16() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 16;
647  }
648  bool isImm32() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value == 32;
654  }
655  bool isShrImm8() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 8;
661  }
662  bool isShrImm16() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 16;
668  }
669  bool isShrImm32() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 32;
675  }
676  bool isShrImm64() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value <= 64;
682  }
683  bool isImm1_7() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 8;
689  }
690  bool isImm1_15() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 16;
696  }
697  bool isImm1_31() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 32;
703  }
704  bool isImm1_16() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 17;
710  }
711  bool isImm1_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value > 0 && Value < 33;
717  }
718  bool isImm0_32() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 33;
724  }
725  bool isImm0_65535() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    if (!CE) return false;
729    int64_t Value = CE->getValue();
730    return Value >= 0 && Value < 65536;
731  }
732  bool isImm0_65535Expr() const {
733    if (!isImm()) return false;
734    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
735    // If it's not a constant expression, it'll generate a fixup and be
736    // handled later.
737    if (!CE) return true;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value < 65536;
740  }
741  bool isImm24bit() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value >= 0 && Value <= 0xffffff;
747  }
748  bool isImmThumbSR() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value > 0 && Value < 33;
754  }
755  bool isPKHLSLImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value >= 0 && Value < 32;
761  }
762  bool isPKHASRImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return Value > 0 && Value <= 32;
768  }
769  bool isARMSOImm() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(Value) != -1;
775  }
776  bool isARMSOImmNot() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(~Value) != -1;
782  }
783  bool isARMSOImmNeg() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getSOImmVal(-Value) != -1;
789  }
790  bool isT2SOImm() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(Value) != -1;
796  }
797  bool isT2SOImmNot() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(~Value) != -1;
803  }
804  bool isT2SOImmNeg() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return ARM_AM::getT2SOImmVal(-Value) != -1;
810  }
811  bool isSetEndImm() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = CE->getValue();
816    return Value == 1 || Value == 0;
817  }
818  bool isReg() const { return Kind == k_Register; }
819  bool isRegList() const { return Kind == k_RegisterList; }
820  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
821  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
822  bool isToken() const { return Kind == k_Token; }
823  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
824  bool isMemory() const { return Kind == k_Memory; }
825  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
826  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
827  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
828  bool isRotImm() const { return Kind == k_RotateImmediate; }
829  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
830  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
831  bool isPostIdxReg() const {
832    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
833  }
834  bool isMemNoOffset(bool alignOK = false) const {
835    if (!isMemory())
836      return false;
837    // No offset of any kind.
838    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
839     (alignOK || Memory.Alignment == 0);
840  }
841  bool isAlignedMemory() const {
842    return isMemNoOffset(true);
843  }
844  bool isAddrMode2() const {
845    if (!isMemory() || Memory.Alignment != 0) return false;
846    // Check for register offset.
847    if (Memory.OffsetRegNum) return true;
848    // Immediate offset in range [-4095, 4095].
849    if (!Memory.OffsetImm) return true;
850    int64_t Val = Memory.OffsetImm->getValue();
851    return Val > -4096 && Val < 4096;
852  }
853  bool isAM2OffsetImm() const {
854    if (!isImm()) return false;
855    // Immediate offset in range [-4095, 4095].
856    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
857    if (!CE) return false;
858    int64_t Val = CE->getValue();
859    return Val > -4096 && Val < 4096;
860  }
861  bool isAddrMode3() const {
862    // If we have an immediate that's not a constant, treat it as a label
863    // reference needing a fixup. If it is a constant, it's something else
864    // and we reject it.
865    if (isImm() && !isa<MCConstantExpr>(getImm()))
866      return true;
867    if (!isMemory() || Memory.Alignment != 0) return false;
868    // No shifts are legal for AM3.
869    if (Memory.ShiftType != ARM_AM::no_shift) return false;
870    // Check for register offset.
871    if (Memory.OffsetRegNum) return true;
872    // Immediate offset in range [-255, 255].
873    if (!Memory.OffsetImm) return true;
874    int64_t Val = Memory.OffsetImm->getValue();
875    return Val > -256 && Val < 256;
876  }
877  bool isAM3Offset() const {
878    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
879      return false;
880    if (Kind == k_PostIndexRegister)
881      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
882    // Immediate offset in range [-255, 255].
883    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
884    if (!CE) return false;
885    int64_t Val = CE->getValue();
886    // Special case, #-0 is INT32_MIN.
887    return (Val > -256 && Val < 256) || Val == INT32_MIN;
888  }
889  bool isAddrMode5() const {
890    // If we have an immediate that's not a constant, treat it as a label
891    // reference needing a fixup. If it is a constant, it's something else
892    // and we reject it.
893    if (isImm() && !isa<MCConstantExpr>(getImm()))
894      return true;
895    if (!isMemory() || Memory.Alignment != 0) return false;
896    // Check for register offset.
897    if (Memory.OffsetRegNum) return false;
898    // Immediate offset in range [-1020, 1020] and a multiple of 4.
899    if (!Memory.OffsetImm) return true;
900    int64_t Val = Memory.OffsetImm->getValue();
901    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
902      Val == INT32_MIN;
903  }
904  bool isMemTBB() const {
905    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
906        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
907      return false;
908    return true;
909  }
910  bool isMemTBH() const {
911    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
912        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
913        Memory.Alignment != 0 )
914      return false;
915    return true;
916  }
917  bool isMemRegOffset() const {
918    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
919      return false;
920    return true;
921  }
922  bool isT2MemRegOffset() const {
923    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
924        Memory.Alignment != 0)
925      return false;
926    // Only lsl #{0, 1, 2, 3} allowed.
927    if (Memory.ShiftType == ARM_AM::no_shift)
928      return true;
929    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
930      return false;
931    return true;
932  }
933  bool isMemThumbRR() const {
934    // Thumb reg+reg addressing is simple. Just two registers, a base and
935    // an offset. No shifts, negations or any other complicating factors.
936    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
937        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
938      return false;
939    return isARMLowRegister(Memory.BaseRegNum) &&
940      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
941  }
942  bool isMemThumbRIs4() const {
943    if (!isMemory() || Memory.OffsetRegNum != 0 ||
944        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
945      return false;
946    // Immediate offset, multiple of 4 in range [0, 124].
947    if (!Memory.OffsetImm) return true;
948    int64_t Val = Memory.OffsetImm->getValue();
949    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
950  }
951  bool isMemThumbRIs2() const {
952    if (!isMemory() || Memory.OffsetRegNum != 0 ||
953        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
954      return false;
955    // Immediate offset, multiple of 4 in range [0, 62].
956    if (!Memory.OffsetImm) return true;
957    int64_t Val = Memory.OffsetImm->getValue();
958    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
959  }
960  bool isMemThumbRIs1() const {
961    if (!isMemory() || Memory.OffsetRegNum != 0 ||
962        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
963      return false;
964    // Immediate offset in range [0, 31].
965    if (!Memory.OffsetImm) return true;
966    int64_t Val = Memory.OffsetImm->getValue();
967    return Val >= 0 && Val <= 31;
968  }
969  bool isMemThumbSPI() const {
970    if (!isMemory() || Memory.OffsetRegNum != 0 ||
971        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
972      return false;
973    // Immediate offset, multiple of 4 in range [0, 1020].
974    if (!Memory.OffsetImm) return true;
975    int64_t Val = Memory.OffsetImm->getValue();
976    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
977  }
978  bool isMemImm8s4Offset() const {
979    // If we have an immediate that's not a constant, treat it as a label
980    // reference needing a fixup. If it is a constant, it's something else
981    // and we reject it.
982    if (isImm() && !isa<MCConstantExpr>(getImm()))
983      return true;
984    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
985      return false;
986    // Immediate offset a multiple of 4 in range [-1020, 1020].
987    if (!Memory.OffsetImm) return true;
988    int64_t Val = Memory.OffsetImm->getValue();
989    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
990  }
991  bool isMemImm0_1020s4Offset() const {
992    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
993      return false;
994    // Immediate offset a multiple of 4 in range [0, 1020].
995    if (!Memory.OffsetImm) return true;
996    int64_t Val = Memory.OffsetImm->getValue();
997    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
998  }
999  bool isMemImm8Offset() const {
1000    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1001      return false;
1002    // Immediate offset in range [-255, 255].
1003    if (!Memory.OffsetImm) return true;
1004    int64_t Val = Memory.OffsetImm->getValue();
1005    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1006  }
1007  bool isMemPosImm8Offset() const {
1008    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1009      return false;
1010    // Immediate offset in range [0, 255].
1011    if (!Memory.OffsetImm) return true;
1012    int64_t Val = Memory.OffsetImm->getValue();
1013    return Val >= 0 && Val < 256;
1014  }
1015  bool isMemNegImm8Offset() const {
1016    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1017      return false;
1018    // Immediate offset in range [-255, -1].
1019    if (!Memory.OffsetImm) return false;
1020    int64_t Val = Memory.OffsetImm->getValue();
1021    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1022  }
1023  bool isMemUImm12Offset() const {
1024    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1025      return false;
1026    // Immediate offset in range [0, 4095].
1027    if (!Memory.OffsetImm) return true;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return (Val >= 0 && Val < 4096);
1030  }
1031  bool isMemImm12Offset() const {
1032    // If we have an immediate that's not a constant, treat it as a label
1033    // reference needing a fixup. If it is a constant, it's something else
1034    // and we reject it.
1035    if (isImm() && !isa<MCConstantExpr>(getImm()))
1036      return true;
1037
1038    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1039      return false;
1040    // Immediate offset in range [-4095, 4095].
1041    if (!Memory.OffsetImm) return true;
1042    int64_t Val = Memory.OffsetImm->getValue();
1043    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1044  }
1045  bool isPostIdxImm8() const {
1046    if (!isImm()) return false;
1047    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1048    if (!CE) return false;
1049    int64_t Val = CE->getValue();
1050    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1051  }
1052  bool isPostIdxImm8s4() const {
1053    if (!isImm()) return false;
1054    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1055    if (!CE) return false;
1056    int64_t Val = CE->getValue();
1057    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1058      (Val == INT32_MIN);
1059  }
1060
1061  bool isMSRMask() const { return Kind == k_MSRMask; }
1062  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1063
1064  // NEON operands.
1065  bool isSingleSpacedVectorList() const {
1066    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1067  }
1068  bool isDoubleSpacedVectorList() const {
1069    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1070  }
1071  bool isVecListOneD() const {
1072    if (!isSingleSpacedVectorList()) return false;
1073    return VectorList.Count == 1;
1074  }
1075
1076  bool isVecListTwoD() const {
1077    if (!isSingleSpacedVectorList()) return false;
1078    return VectorList.Count == 2;
1079  }
1080
1081  bool isVecListThreeD() const {
1082    if (!isSingleSpacedVectorList()) return false;
1083    return VectorList.Count == 3;
1084  }
1085
1086  bool isVecListFourD() const {
1087    if (!isSingleSpacedVectorList()) return false;
1088    return VectorList.Count == 4;
1089  }
1090
1091  bool isVecListTwoQ() const {
1092    if (!isDoubleSpacedVectorList()) return false;
1093    return VectorList.Count == 2;
1094  }
1095
1096  bool isSingleSpacedVectorAllLanes() const {
1097    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1098  }
1099  bool isDoubleSpacedVectorAllLanes() const {
1100    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1101  }
1102  bool isVecListOneDAllLanes() const {
1103    if (!isSingleSpacedVectorAllLanes()) return false;
1104    return VectorList.Count == 1;
1105  }
1106
1107  bool isVecListTwoDAllLanes() const {
1108    if (!isSingleSpacedVectorAllLanes()) return false;
1109    return VectorList.Count == 2;
1110  }
1111
1112  bool isVecListTwoQAllLanes() const {
1113    if (!isDoubleSpacedVectorAllLanes()) return false;
1114    return VectorList.Count == 2;
1115  }
1116
1117  bool isSingleSpacedVectorIndexed() const {
1118    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1119  }
1120  bool isDoubleSpacedVectorIndexed() const {
1121    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1122  }
1123  bool isVecListOneDByteIndexed() const {
1124    if (!isSingleSpacedVectorIndexed()) return false;
1125    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1126  }
1127
1128  bool isVecListOneDHWordIndexed() const {
1129    if (!isSingleSpacedVectorIndexed()) return false;
1130    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1131  }
1132
1133  bool isVecListOneDWordIndexed() const {
1134    if (!isSingleSpacedVectorIndexed()) return false;
1135    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1136  }
1137
1138  bool isVecListTwoDByteIndexed() const {
1139    if (!isSingleSpacedVectorIndexed()) return false;
1140    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1141  }
1142
1143  bool isVecListTwoDHWordIndexed() const {
1144    if (!isSingleSpacedVectorIndexed()) return false;
1145    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1146  }
1147
1148  bool isVecListTwoQWordIndexed() const {
1149    if (!isDoubleSpacedVectorIndexed()) return false;
1150    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1151  }
1152
1153  bool isVecListTwoQHWordIndexed() const {
1154    if (!isDoubleSpacedVectorIndexed()) return false;
1155    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1156  }
1157
1158  bool isVecListTwoDWordIndexed() const {
1159    if (!isSingleSpacedVectorIndexed()) return false;
1160    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1161  }
1162
1163  bool isVectorIndex8() const {
1164    if (Kind != k_VectorIndex) return false;
1165    return VectorIndex.Val < 8;
1166  }
1167  bool isVectorIndex16() const {
1168    if (Kind != k_VectorIndex) return false;
1169    return VectorIndex.Val < 4;
1170  }
1171  bool isVectorIndex32() const {
1172    if (Kind != k_VectorIndex) return false;
1173    return VectorIndex.Val < 2;
1174  }
1175
1176  bool isNEONi8splat() const {
1177    if (!isImm()) return false;
1178    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1179    // Must be a constant.
1180    if (!CE) return false;
1181    int64_t Value = CE->getValue();
1182    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1183    // value.
1184    return Value >= 0 && Value < 256;
1185  }
1186
1187  bool isNEONi16splat() const {
1188    if (!isImm()) return false;
1189    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1190    // Must be a constant.
1191    if (!CE) return false;
1192    int64_t Value = CE->getValue();
1193    // i16 value in the range [0,255] or [0x0100, 0xff00]
1194    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1195  }
1196
1197  bool isNEONi32splat() const {
1198    if (!isImm()) return false;
1199    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1200    // Must be a constant.
1201    if (!CE) return false;
1202    int64_t Value = CE->getValue();
1203    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1204    return (Value >= 0 && Value < 256) ||
1205      (Value >= 0x0100 && Value <= 0xff00) ||
1206      (Value >= 0x010000 && Value <= 0xff0000) ||
1207      (Value >= 0x01000000 && Value <= 0xff000000);
1208  }
1209
1210  bool isNEONi32vmov() const {
1211    if (!isImm()) return false;
1212    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1213    // Must be a constant.
1214    if (!CE) return false;
1215    int64_t Value = CE->getValue();
1216    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1217    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1218    return (Value >= 0 && Value < 256) ||
1219      (Value >= 0x0100 && Value <= 0xff00) ||
1220      (Value >= 0x010000 && Value <= 0xff0000) ||
1221      (Value >= 0x01000000 && Value <= 0xff000000) ||
1222      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1223      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1224  }
1225  bool isNEONi32vmovNeg() const {
1226    if (!isImm()) return false;
1227    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1228    // Must be a constant.
1229    if (!CE) return false;
1230    int64_t Value = ~CE->getValue();
1231    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1232    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1233    return (Value >= 0 && Value < 256) ||
1234      (Value >= 0x0100 && Value <= 0xff00) ||
1235      (Value >= 0x010000 && Value <= 0xff0000) ||
1236      (Value >= 0x01000000 && Value <= 0xff000000) ||
1237      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1238      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1239  }
1240
1241  bool isNEONi64splat() const {
1242    if (!isImm()) return false;
1243    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1244    // Must be a constant.
1245    if (!CE) return false;
1246    uint64_t Value = CE->getValue();
1247    // i64 value with each byte being either 0 or 0xff.
1248    for (unsigned i = 0; i < 8; ++i)
1249      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1250    return true;
1251  }
1252
1253  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1254    // Add as immediates when possible.  Null MCExpr = 0.
1255    if (Expr == 0)
1256      Inst.addOperand(MCOperand::CreateImm(0));
1257    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1258      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1259    else
1260      Inst.addOperand(MCOperand::CreateExpr(Expr));
1261  }
1262
1263  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1264    assert(N == 2 && "Invalid number of operands!");
1265    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1266    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1267    Inst.addOperand(MCOperand::CreateReg(RegNum));
1268  }
1269
1270  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1271    assert(N == 1 && "Invalid number of operands!");
1272    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1273  }
1274
1275  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1276    assert(N == 1 && "Invalid number of operands!");
1277    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1278  }
1279
1280  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1281    assert(N == 1 && "Invalid number of operands!");
1282    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1283  }
1284
1285  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1286    assert(N == 1 && "Invalid number of operands!");
1287    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1288  }
1289
1290  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1291    assert(N == 1 && "Invalid number of operands!");
1292    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1293  }
1294
1295  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1296    assert(N == 1 && "Invalid number of operands!");
1297    Inst.addOperand(MCOperand::CreateReg(getReg()));
1298  }
1299
1300  void addRegOperands(MCInst &Inst, unsigned N) const {
1301    assert(N == 1 && "Invalid number of operands!");
1302    Inst.addOperand(MCOperand::CreateReg(getReg()));
1303  }
1304
1305  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1306    assert(N == 3 && "Invalid number of operands!");
1307    assert(isRegShiftedReg() &&
1308           "addRegShiftedRegOperands() on non RegShiftedReg!");
1309    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1310    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1311    Inst.addOperand(MCOperand::CreateImm(
1312      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1313  }
1314
1315  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1316    assert(N == 2 && "Invalid number of operands!");
1317    assert(isRegShiftedImm() &&
1318           "addRegShiftedImmOperands() on non RegShiftedImm!");
1319    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1320    Inst.addOperand(MCOperand::CreateImm(
1321      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1322  }
1323
1324  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1325    assert(N == 1 && "Invalid number of operands!");
1326    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1327                                         ShifterImm.Imm));
1328  }
1329
1330  void addRegListOperands(MCInst &Inst, unsigned N) const {
1331    assert(N == 1 && "Invalid number of operands!");
1332    const SmallVectorImpl<unsigned> &RegList = getRegList();
1333    for (SmallVectorImpl<unsigned>::const_iterator
1334           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1335      Inst.addOperand(MCOperand::CreateReg(*I));
1336  }
1337
1338  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1339    addRegListOperands(Inst, N);
1340  }
1341
1342  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1343    addRegListOperands(Inst, N);
1344  }
1345
1346  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1347    assert(N == 1 && "Invalid number of operands!");
1348    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1349    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1350  }
1351
1352  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1353    assert(N == 1 && "Invalid number of operands!");
1354    // Munge the lsb/width into a bitfield mask.
1355    unsigned lsb = Bitfield.LSB;
1356    unsigned width = Bitfield.Width;
1357    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1358    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1359                      (32 - (lsb + width)));
1360    Inst.addOperand(MCOperand::CreateImm(Mask));
1361  }
1362
1363  void addImmOperands(MCInst &Inst, unsigned N) const {
1364    assert(N == 1 && "Invalid number of operands!");
1365    addExpr(Inst, getImm());
1366  }
1367
1368  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1369    assert(N == 1 && "Invalid number of operands!");
1370    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1371    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1372  }
1373
1374  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1375    assert(N == 1 && "Invalid number of operands!");
1376    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1377    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1378  }
1379
1380  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1381    assert(N == 1 && "Invalid number of operands!");
1382    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1383  }
1384
1385  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1386    assert(N == 1 && "Invalid number of operands!");
1387    // FIXME: We really want to scale the value here, but the LDRD/STRD
1388    // instruction don't encode operands that way yet.
1389    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1390    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1391  }
1392
1393  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1394    assert(N == 1 && "Invalid number of operands!");
1395    // The immediate is scaled by four in the encoding and is stored
1396    // in the MCInst as such. Lop off the low two bits here.
1397    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1398    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1399  }
1400
1401  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1402    assert(N == 1 && "Invalid number of operands!");
1403    // The immediate is scaled by four in the encoding and is stored
1404    // in the MCInst as such. Lop off the low two bits here.
1405    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1406    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1407  }
1408
1409  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1410    assert(N == 1 && "Invalid number of operands!");
1411    // The constant encodes as the immediate-1, and we store in the instruction
1412    // the bits as encoded, so subtract off one here.
1413    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1414    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1415  }
1416
1417  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1418    assert(N == 1 && "Invalid number of operands!");
1419    // The constant encodes as the immediate-1, and we store in the instruction
1420    // the bits as encoded, so subtract off one here.
1421    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1422    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1423  }
1424
1425  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1426    assert(N == 1 && "Invalid number of operands!");
1427    // The constant encodes as the immediate, except for 32, which encodes as
1428    // zero.
1429    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1430    unsigned Imm = CE->getValue();
1431    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1432  }
1433
1434  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1435    assert(N == 1 && "Invalid number of operands!");
1436    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1437    // the instruction as well.
1438    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1439    int Val = CE->getValue();
1440    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1441  }
1442
1443  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1444    assert(N == 1 && "Invalid number of operands!");
1445    // The operand is actually a t2_so_imm, but we have its bitwise
1446    // negation in the assembly source, so twiddle it here.
1447    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1448    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1449  }
1450
1451  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    // The operand is actually a t2_so_imm, but we have its
1454    // negation in the assembly source, so twiddle it here.
1455    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1456    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1457  }
1458
1459  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1460    assert(N == 1 && "Invalid number of operands!");
1461    // The operand is actually a so_imm, but we have its bitwise
1462    // negation in the assembly source, so twiddle it here.
1463    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1464    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1465  }
1466
1467  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    // The operand is actually a so_imm, but we have its
1470    // negation in the assembly source, so twiddle it here.
1471    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1472    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1473  }
1474
1475  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 1 && "Invalid number of operands!");
1477    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1478  }
1479
1480  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1481    assert(N == 1 && "Invalid number of operands!");
1482    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1483  }
1484
1485  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1486    assert(N == 2 && "Invalid number of operands!");
1487    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1488    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1489  }
1490
1491  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1492    assert(N == 3 && "Invalid number of operands!");
1493    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1494    if (!Memory.OffsetRegNum) {
1495      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1496      // Special case for #-0
1497      if (Val == INT32_MIN) Val = 0;
1498      if (Val < 0) Val = -Val;
1499      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1500    } else {
1501      // For register offset, we encode the shift type and negation flag
1502      // here.
1503      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1504                              Memory.ShiftImm, Memory.ShiftType);
1505    }
1506    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1507    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1508    Inst.addOperand(MCOperand::CreateImm(Val));
1509  }
1510
1511  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1512    assert(N == 2 && "Invalid number of operands!");
1513    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1514    assert(CE && "non-constant AM2OffsetImm operand!");
1515    int32_t Val = CE->getValue();
1516    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1517    // Special case for #-0
1518    if (Val == INT32_MIN) Val = 0;
1519    if (Val < 0) Val = -Val;
1520    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1521    Inst.addOperand(MCOperand::CreateReg(0));
1522    Inst.addOperand(MCOperand::CreateImm(Val));
1523  }
1524
1525  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1526    assert(N == 3 && "Invalid number of operands!");
1527    // If we have an immediate that's not a constant, treat it as a label
1528    // reference needing a fixup. If it is a constant, it's something else
1529    // and we reject it.
1530    if (isImm()) {
1531      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1532      Inst.addOperand(MCOperand::CreateReg(0));
1533      Inst.addOperand(MCOperand::CreateImm(0));
1534      return;
1535    }
1536
1537    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1538    if (!Memory.OffsetRegNum) {
1539      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1540      // Special case for #-0
1541      if (Val == INT32_MIN) Val = 0;
1542      if (Val < 0) Val = -Val;
1543      Val = ARM_AM::getAM3Opc(AddSub, Val);
1544    } else {
1545      // For register offset, we encode the shift type and negation flag
1546      // here.
1547      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1548    }
1549    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1550    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1551    Inst.addOperand(MCOperand::CreateImm(Val));
1552  }
1553
1554  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1555    assert(N == 2 && "Invalid number of operands!");
1556    if (Kind == k_PostIndexRegister) {
1557      int32_t Val =
1558        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1559      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1560      Inst.addOperand(MCOperand::CreateImm(Val));
1561      return;
1562    }
1563
1564    // Constant offset.
1565    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1566    int32_t Val = CE->getValue();
1567    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1568    // Special case for #-0
1569    if (Val == INT32_MIN) Val = 0;
1570    if (Val < 0) Val = -Val;
1571    Val = ARM_AM::getAM3Opc(AddSub, Val);
1572    Inst.addOperand(MCOperand::CreateReg(0));
1573    Inst.addOperand(MCOperand::CreateImm(Val));
1574  }
1575
1576  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1577    assert(N == 2 && "Invalid number of operands!");
1578    // If we have an immediate that's not a constant, treat it as a label
1579    // reference needing a fixup. If it is a constant, it's something else
1580    // and we reject it.
1581    if (isImm()) {
1582      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1583      Inst.addOperand(MCOperand::CreateImm(0));
1584      return;
1585    }
1586
1587    // The lower two bits are always zero and as such are not encoded.
1588    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1589    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1590    // Special case for #-0
1591    if (Val == INT32_MIN) Val = 0;
1592    if (Val < 0) Val = -Val;
1593    Val = ARM_AM::getAM5Opc(AddSub, Val);
1594    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1595    Inst.addOperand(MCOperand::CreateImm(Val));
1596  }
1597
1598  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1599    assert(N == 2 && "Invalid number of operands!");
1600    // If we have an immediate that's not a constant, treat it as a label
1601    // reference needing a fixup. If it is a constant, it's something else
1602    // and we reject it.
1603    if (isImm()) {
1604      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1605      Inst.addOperand(MCOperand::CreateImm(0));
1606      return;
1607    }
1608
1609    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1610    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1611    Inst.addOperand(MCOperand::CreateImm(Val));
1612  }
1613
1614  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1615    assert(N == 2 && "Invalid number of operands!");
1616    // The lower two bits are always zero and as such are not encoded.
1617    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1618    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1619    Inst.addOperand(MCOperand::CreateImm(Val));
1620  }
1621
1622  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1623    assert(N == 2 && "Invalid number of operands!");
1624    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1625    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1626    Inst.addOperand(MCOperand::CreateImm(Val));
1627  }
1628
1629  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1630    addMemImm8OffsetOperands(Inst, N);
1631  }
1632
1633  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1634    addMemImm8OffsetOperands(Inst, N);
1635  }
1636
1637  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1638    assert(N == 2 && "Invalid number of operands!");
1639    // If this is an immediate, it's a label reference.
1640    if (isImm()) {
1641      addExpr(Inst, getImm());
1642      Inst.addOperand(MCOperand::CreateImm(0));
1643      return;
1644    }
1645
1646    // Otherwise, it's a normal memory reg+offset.
1647    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1648    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1649    Inst.addOperand(MCOperand::CreateImm(Val));
1650  }
1651
1652  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1653    assert(N == 2 && "Invalid number of operands!");
1654    // If this is an immediate, it's a label reference.
1655    if (isImm()) {
1656      addExpr(Inst, getImm());
1657      Inst.addOperand(MCOperand::CreateImm(0));
1658      return;
1659    }
1660
1661    // Otherwise, it's a normal memory reg+offset.
1662    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1663    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1664    Inst.addOperand(MCOperand::CreateImm(Val));
1665  }
1666
1667  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1668    assert(N == 2 && "Invalid number of operands!");
1669    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1670    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1671  }
1672
1673  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1674    assert(N == 2 && "Invalid number of operands!");
1675    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1676    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1677  }
1678
1679  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1680    assert(N == 3 && "Invalid number of operands!");
1681    unsigned Val =
1682      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1683                        Memory.ShiftImm, Memory.ShiftType);
1684    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1685    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1686    Inst.addOperand(MCOperand::CreateImm(Val));
1687  }
1688
1689  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1690    assert(N == 3 && "Invalid number of operands!");
1691    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1692    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1693    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1694  }
1695
1696  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1697    assert(N == 2 && "Invalid number of operands!");
1698    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1699    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1700  }
1701
1702  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1703    assert(N == 2 && "Invalid number of operands!");
1704    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1705    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1706    Inst.addOperand(MCOperand::CreateImm(Val));
1707  }
1708
1709  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1710    assert(N == 2 && "Invalid number of operands!");
1711    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1712    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1713    Inst.addOperand(MCOperand::CreateImm(Val));
1714  }
1715
1716  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1717    assert(N == 2 && "Invalid number of operands!");
1718    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1719    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1720    Inst.addOperand(MCOperand::CreateImm(Val));
1721  }
1722
1723  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1724    assert(N == 2 && "Invalid number of operands!");
1725    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1726    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1727    Inst.addOperand(MCOperand::CreateImm(Val));
1728  }
1729
1730  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1731    assert(N == 1 && "Invalid number of operands!");
1732    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1733    assert(CE && "non-constant post-idx-imm8 operand!");
1734    int Imm = CE->getValue();
1735    bool isAdd = Imm >= 0;
1736    if (Imm == INT32_MIN) Imm = 0;
1737    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1738    Inst.addOperand(MCOperand::CreateImm(Imm));
1739  }
1740
1741  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1742    assert(N == 1 && "Invalid number of operands!");
1743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1744    assert(CE && "non-constant post-idx-imm8s4 operand!");
1745    int Imm = CE->getValue();
1746    bool isAdd = Imm >= 0;
1747    if (Imm == INT32_MIN) Imm = 0;
1748    // Immediate is scaled by 4.
1749    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1750    Inst.addOperand(MCOperand::CreateImm(Imm));
1751  }
1752
1753  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1754    assert(N == 2 && "Invalid number of operands!");
1755    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1756    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1757  }
1758
1759  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1760    assert(N == 2 && "Invalid number of operands!");
1761    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1762    // The sign, shift type, and shift amount are encoded in a single operand
1763    // using the AM2 encoding helpers.
1764    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1765    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1766                                     PostIdxReg.ShiftTy);
1767    Inst.addOperand(MCOperand::CreateImm(Imm));
1768  }
1769
1770  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1771    assert(N == 1 && "Invalid number of operands!");
1772    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1773  }
1774
1775  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1776    assert(N == 1 && "Invalid number of operands!");
1777    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1778  }
1779
1780  void addVecListOperands(MCInst &Inst, unsigned N) const {
1781    assert(N == 1 && "Invalid number of operands!");
1782    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1783  }
1784
1785  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1786    assert(N == 2 && "Invalid number of operands!");
1787    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1788    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1789  }
1790
1791  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1792    assert(N == 1 && "Invalid number of operands!");
1793    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1794  }
1795
1796  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1797    assert(N == 1 && "Invalid number of operands!");
1798    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1799  }
1800
1801  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1802    assert(N == 1 && "Invalid number of operands!");
1803    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1804  }
1805
1806  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1807    assert(N == 1 && "Invalid number of operands!");
1808    // The immediate encodes the type of constant as well as the value.
1809    // Mask in that this is an i8 splat.
1810    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1811    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1812  }
1813
1814  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1815    assert(N == 1 && "Invalid number of operands!");
1816    // The immediate encodes the type of constant as well as the value.
1817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1818    unsigned Value = CE->getValue();
1819    if (Value >= 256)
1820      Value = (Value >> 8) | 0xa00;
1821    else
1822      Value |= 0x800;
1823    Inst.addOperand(MCOperand::CreateImm(Value));
1824  }
1825
1826  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1827    assert(N == 1 && "Invalid number of operands!");
1828    // The immediate encodes the type of constant as well as the value.
1829    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1830    unsigned Value = CE->getValue();
1831    if (Value >= 256 && Value <= 0xff00)
1832      Value = (Value >> 8) | 0x200;
1833    else if (Value > 0xffff && Value <= 0xff0000)
1834      Value = (Value >> 16) | 0x400;
1835    else if (Value > 0xffffff)
1836      Value = (Value >> 24) | 0x600;
1837    Inst.addOperand(MCOperand::CreateImm(Value));
1838  }
1839
1840  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1841    assert(N == 1 && "Invalid number of operands!");
1842    // The immediate encodes the type of constant as well as the value.
1843    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1844    unsigned Value = CE->getValue();
1845    if (Value >= 256 && Value <= 0xffff)
1846      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1847    else if (Value > 0xffff && Value <= 0xffffff)
1848      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1849    else if (Value > 0xffffff)
1850      Value = (Value >> 24) | 0x600;
1851    Inst.addOperand(MCOperand::CreateImm(Value));
1852  }
1853
1854  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1855    assert(N == 1 && "Invalid number of operands!");
1856    // The immediate encodes the type of constant as well as the value.
1857    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1858    unsigned Value = ~CE->getValue();
1859    if (Value >= 256 && Value <= 0xffff)
1860      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1861    else if (Value > 0xffff && Value <= 0xffffff)
1862      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1863    else if (Value > 0xffffff)
1864      Value = (Value >> 24) | 0x600;
1865    Inst.addOperand(MCOperand::CreateImm(Value));
1866  }
1867
1868  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1869    assert(N == 1 && "Invalid number of operands!");
1870    // The immediate encodes the type of constant as well as the value.
1871    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1872    uint64_t Value = CE->getValue();
1873    unsigned Imm = 0;
1874    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1875      Imm |= (Value & 1) << i;
1876    }
1877    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1878  }
1879
1880  virtual void print(raw_ostream &OS) const;
1881
1882  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1883    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1884    Op->ITMask.Mask = Mask;
1885    Op->StartLoc = S;
1886    Op->EndLoc = S;
1887    return Op;
1888  }
1889
1890  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1891    ARMOperand *Op = new ARMOperand(k_CondCode);
1892    Op->CC.Val = CC;
1893    Op->StartLoc = S;
1894    Op->EndLoc = S;
1895    return Op;
1896  }
1897
1898  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1899    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1900    Op->Cop.Val = CopVal;
1901    Op->StartLoc = S;
1902    Op->EndLoc = S;
1903    return Op;
1904  }
1905
1906  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1907    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1908    Op->Cop.Val = CopVal;
1909    Op->StartLoc = S;
1910    Op->EndLoc = S;
1911    return Op;
1912  }
1913
1914  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1915    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1916    Op->Cop.Val = Val;
1917    Op->StartLoc = S;
1918    Op->EndLoc = E;
1919    return Op;
1920  }
1921
1922  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1923    ARMOperand *Op = new ARMOperand(k_CCOut);
1924    Op->Reg.RegNum = RegNum;
1925    Op->StartLoc = S;
1926    Op->EndLoc = S;
1927    return Op;
1928  }
1929
1930  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1931    ARMOperand *Op = new ARMOperand(k_Token);
1932    Op->Tok.Data = Str.data();
1933    Op->Tok.Length = Str.size();
1934    Op->StartLoc = S;
1935    Op->EndLoc = S;
1936    return Op;
1937  }
1938
1939  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1940    ARMOperand *Op = new ARMOperand(k_Register);
1941    Op->Reg.RegNum = RegNum;
1942    Op->StartLoc = S;
1943    Op->EndLoc = E;
1944    return Op;
1945  }
1946
1947  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1948                                           unsigned SrcReg,
1949                                           unsigned ShiftReg,
1950                                           unsigned ShiftImm,
1951                                           SMLoc S, SMLoc E) {
1952    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1953    Op->RegShiftedReg.ShiftTy = ShTy;
1954    Op->RegShiftedReg.SrcReg = SrcReg;
1955    Op->RegShiftedReg.ShiftReg = ShiftReg;
1956    Op->RegShiftedReg.ShiftImm = ShiftImm;
1957    Op->StartLoc = S;
1958    Op->EndLoc = E;
1959    return Op;
1960  }
1961
1962  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1963                                            unsigned SrcReg,
1964                                            unsigned ShiftImm,
1965                                            SMLoc S, SMLoc E) {
1966    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1967    Op->RegShiftedImm.ShiftTy = ShTy;
1968    Op->RegShiftedImm.SrcReg = SrcReg;
1969    Op->RegShiftedImm.ShiftImm = ShiftImm;
1970    Op->StartLoc = S;
1971    Op->EndLoc = E;
1972    return Op;
1973  }
1974
1975  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1976                                   SMLoc S, SMLoc E) {
1977    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1978    Op->ShifterImm.isASR = isASR;
1979    Op->ShifterImm.Imm = Imm;
1980    Op->StartLoc = S;
1981    Op->EndLoc = E;
1982    return Op;
1983  }
1984
1985  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1986    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1987    Op->RotImm.Imm = Imm;
1988    Op->StartLoc = S;
1989    Op->EndLoc = E;
1990    return Op;
1991  }
1992
1993  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1994                                    SMLoc S, SMLoc E) {
1995    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1996    Op->Bitfield.LSB = LSB;
1997    Op->Bitfield.Width = Width;
1998    Op->StartLoc = S;
1999    Op->EndLoc = E;
2000    return Op;
2001  }
2002
2003  static ARMOperand *
2004  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2005                SMLoc StartLoc, SMLoc EndLoc) {
2006    KindTy Kind = k_RegisterList;
2007
2008    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2009      Kind = k_DPRRegisterList;
2010    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2011             contains(Regs.front().first))
2012      Kind = k_SPRRegisterList;
2013
2014    ARMOperand *Op = new ARMOperand(Kind);
2015    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2016           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2017      Op->Registers.push_back(I->first);
2018    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2019    Op->StartLoc = StartLoc;
2020    Op->EndLoc = EndLoc;
2021    return Op;
2022  }
2023
2024  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2025                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2026    ARMOperand *Op = new ARMOperand(k_VectorList);
2027    Op->VectorList.RegNum = RegNum;
2028    Op->VectorList.Count = Count;
2029    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2030    Op->StartLoc = S;
2031    Op->EndLoc = E;
2032    return Op;
2033  }
2034
2035  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2036                                              bool isDoubleSpaced,
2037                                              SMLoc S, SMLoc E) {
2038    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2039    Op->VectorList.RegNum = RegNum;
2040    Op->VectorList.Count = Count;
2041    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2042    Op->StartLoc = S;
2043    Op->EndLoc = E;
2044    return Op;
2045  }
2046
2047  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2048                                             unsigned Index,
2049                                             bool isDoubleSpaced,
2050                                             SMLoc S, SMLoc E) {
2051    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2052    Op->VectorList.RegNum = RegNum;
2053    Op->VectorList.Count = Count;
2054    Op->VectorList.LaneIndex = Index;
2055    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2056    Op->StartLoc = S;
2057    Op->EndLoc = E;
2058    return Op;
2059  }
2060
2061  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2062                                       MCContext &Ctx) {
2063    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2064    Op->VectorIndex.Val = Idx;
2065    Op->StartLoc = S;
2066    Op->EndLoc = E;
2067    return Op;
2068  }
2069
2070  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2071    ARMOperand *Op = new ARMOperand(k_Immediate);
2072    Op->Imm.Val = Val;
2073    Op->StartLoc = S;
2074    Op->EndLoc = E;
2075    return Op;
2076  }
2077
2078  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2079    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2080    Op->FPImm.Val = Val;
2081    Op->StartLoc = S;
2082    Op->EndLoc = S;
2083    return Op;
2084  }
2085
2086  static ARMOperand *CreateMem(unsigned BaseRegNum,
2087                               const MCConstantExpr *OffsetImm,
2088                               unsigned OffsetRegNum,
2089                               ARM_AM::ShiftOpc ShiftType,
2090                               unsigned ShiftImm,
2091                               unsigned Alignment,
2092                               bool isNegative,
2093                               SMLoc S, SMLoc E) {
2094    ARMOperand *Op = new ARMOperand(k_Memory);
2095    Op->Memory.BaseRegNum = BaseRegNum;
2096    Op->Memory.OffsetImm = OffsetImm;
2097    Op->Memory.OffsetRegNum = OffsetRegNum;
2098    Op->Memory.ShiftType = ShiftType;
2099    Op->Memory.ShiftImm = ShiftImm;
2100    Op->Memory.Alignment = Alignment;
2101    Op->Memory.isNegative = isNegative;
2102    Op->StartLoc = S;
2103    Op->EndLoc = E;
2104    return Op;
2105  }
2106
2107  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2108                                      ARM_AM::ShiftOpc ShiftTy,
2109                                      unsigned ShiftImm,
2110                                      SMLoc S, SMLoc E) {
2111    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2112    Op->PostIdxReg.RegNum = RegNum;
2113    Op->PostIdxReg.isAdd = isAdd;
2114    Op->PostIdxReg.ShiftTy = ShiftTy;
2115    Op->PostIdxReg.ShiftImm = ShiftImm;
2116    Op->StartLoc = S;
2117    Op->EndLoc = E;
2118    return Op;
2119  }
2120
2121  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2122    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2123    Op->MBOpt.Val = Opt;
2124    Op->StartLoc = S;
2125    Op->EndLoc = S;
2126    return Op;
2127  }
2128
2129  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2130    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2131    Op->IFlags.Val = IFlags;
2132    Op->StartLoc = S;
2133    Op->EndLoc = S;
2134    return Op;
2135  }
2136
2137  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2138    ARMOperand *Op = new ARMOperand(k_MSRMask);
2139    Op->MMask.Val = MMask;
2140    Op->StartLoc = S;
2141    Op->EndLoc = S;
2142    return Op;
2143  }
2144};
2145
2146} // end anonymous namespace.
2147
2148void ARMOperand::print(raw_ostream &OS) const {
2149  switch (Kind) {
2150  case k_FPImmediate:
2151    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2152       << ") >";
2153    break;
2154  case k_CondCode:
2155    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2156    break;
2157  case k_CCOut:
2158    OS << "<ccout " << getReg() << ">";
2159    break;
2160  case k_ITCondMask: {
2161    static const char *MaskStr[] = {
2162      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2163      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2164    };
2165    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2166    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2167    break;
2168  }
2169  case k_CoprocNum:
2170    OS << "<coprocessor number: " << getCoproc() << ">";
2171    break;
2172  case k_CoprocReg:
2173    OS << "<coprocessor register: " << getCoproc() << ">";
2174    break;
2175  case k_CoprocOption:
2176    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2177    break;
2178  case k_MSRMask:
2179    OS << "<mask: " << getMSRMask() << ">";
2180    break;
2181  case k_Immediate:
2182    getImm()->print(OS);
2183    break;
2184  case k_MemBarrierOpt:
2185    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2186    break;
2187  case k_Memory:
2188    OS << "<memory "
2189       << " base:" << Memory.BaseRegNum;
2190    OS << ">";
2191    break;
2192  case k_PostIndexRegister:
2193    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2194       << PostIdxReg.RegNum;
2195    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2196      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2197         << PostIdxReg.ShiftImm;
2198    OS << ">";
2199    break;
2200  case k_ProcIFlags: {
2201    OS << "<ARM_PROC::";
2202    unsigned IFlags = getProcIFlags();
2203    for (int i=2; i >= 0; --i)
2204      if (IFlags & (1 << i))
2205        OS << ARM_PROC::IFlagsToString(1 << i);
2206    OS << ">";
2207    break;
2208  }
2209  case k_Register:
2210    OS << "<register " << getReg() << ">";
2211    break;
2212  case k_ShifterImmediate:
2213    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2214       << " #" << ShifterImm.Imm << ">";
2215    break;
2216  case k_ShiftedRegister:
2217    OS << "<so_reg_reg "
2218       << RegShiftedReg.SrcReg << " "
2219       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2220       << " " << RegShiftedReg.ShiftReg << ">";
2221    break;
2222  case k_ShiftedImmediate:
2223    OS << "<so_reg_imm "
2224       << RegShiftedImm.SrcReg << " "
2225       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2226       << " #" << RegShiftedImm.ShiftImm << ">";
2227    break;
2228  case k_RotateImmediate:
2229    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2230    break;
2231  case k_BitfieldDescriptor:
2232    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2233       << ", width: " << Bitfield.Width << ">";
2234    break;
2235  case k_RegisterList:
2236  case k_DPRRegisterList:
2237  case k_SPRRegisterList: {
2238    OS << "<register_list ";
2239
2240    const SmallVectorImpl<unsigned> &RegList = getRegList();
2241    for (SmallVectorImpl<unsigned>::const_iterator
2242           I = RegList.begin(), E = RegList.end(); I != E; ) {
2243      OS << *I;
2244      if (++I < E) OS << ", ";
2245    }
2246
2247    OS << ">";
2248    break;
2249  }
2250  case k_VectorList:
2251    OS << "<vector_list " << VectorList.Count << " * "
2252       << VectorList.RegNum << ">";
2253    break;
2254  case k_VectorListAllLanes:
2255    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2256       << VectorList.RegNum << ">";
2257    break;
2258  case k_VectorListIndexed:
2259    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2260       << VectorList.Count << " * " << VectorList.RegNum << ">";
2261    break;
2262  case k_Token:
2263    OS << "'" << getToken() << "'";
2264    break;
2265  case k_VectorIndex:
2266    OS << "<vectorindex " << getVectorIndex() << ">";
2267    break;
2268  }
2269}
2270
2271/// @name Auto-generated Match Functions
2272/// {
2273
2274static unsigned MatchRegisterName(StringRef Name);
2275
2276/// }
2277
2278bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2279                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2280  StartLoc = Parser.getTok().getLoc();
2281  RegNo = tryParseRegister();
2282  EndLoc = Parser.getTok().getLoc();
2283
2284  return (RegNo == (unsigned)-1);
2285}
2286
2287/// Try to parse a register name.  The token must be an Identifier when called,
2288/// and if it is a register name the token is eaten and the register number is
2289/// returned.  Otherwise return -1.
2290///
2291int ARMAsmParser::tryParseRegister() {
2292  const AsmToken &Tok = Parser.getTok();
2293  if (Tok.isNot(AsmToken::Identifier)) return -1;
2294
2295  std::string lowerCase = Tok.getString().lower();
2296  unsigned RegNum = MatchRegisterName(lowerCase);
2297  if (!RegNum) {
2298    RegNum = StringSwitch<unsigned>(lowerCase)
2299      .Case("r13", ARM::SP)
2300      .Case("r14", ARM::LR)
2301      .Case("r15", ARM::PC)
2302      .Case("ip", ARM::R12)
2303      // Additional register name aliases for 'gas' compatibility.
2304      .Case("a1", ARM::R0)
2305      .Case("a2", ARM::R1)
2306      .Case("a3", ARM::R2)
2307      .Case("a4", ARM::R3)
2308      .Case("v1", ARM::R4)
2309      .Case("v2", ARM::R5)
2310      .Case("v3", ARM::R6)
2311      .Case("v4", ARM::R7)
2312      .Case("v5", ARM::R8)
2313      .Case("v6", ARM::R9)
2314      .Case("v7", ARM::R10)
2315      .Case("v8", ARM::R11)
2316      .Case("sb", ARM::R9)
2317      .Case("sl", ARM::R10)
2318      .Case("fp", ARM::R11)
2319      .Default(0);
2320  }
2321  if (!RegNum) {
2322    // Check for aliases registered via .req. Canonicalize to lower case.
2323    // That's more consistent since register names are case insensitive, and
2324    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2325    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2326    // If no match, return failure.
2327    if (Entry == RegisterReqs.end())
2328      return -1;
2329    Parser.Lex(); // Eat identifier token.
2330    return Entry->getValue();
2331  }
2332
2333  Parser.Lex(); // Eat identifier token.
2334
2335  return RegNum;
2336}
2337
2338// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2339// If a recoverable error occurs, return 1. If an irrecoverable error
2340// occurs, return -1. An irrecoverable error is one where tokens have been
2341// consumed in the process of trying to parse the shifter (i.e., when it is
2342// indeed a shifter operand, but malformed).
2343int ARMAsmParser::tryParseShiftRegister(
2344                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2345  SMLoc S = Parser.getTok().getLoc();
2346  const AsmToken &Tok = Parser.getTok();
2347  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2348
2349  std::string lowerCase = Tok.getString().lower();
2350  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2351      .Case("asl", ARM_AM::lsl)
2352      .Case("lsl", ARM_AM::lsl)
2353      .Case("lsr", ARM_AM::lsr)
2354      .Case("asr", ARM_AM::asr)
2355      .Case("ror", ARM_AM::ror)
2356      .Case("rrx", ARM_AM::rrx)
2357      .Default(ARM_AM::no_shift);
2358
2359  if (ShiftTy == ARM_AM::no_shift)
2360    return 1;
2361
2362  Parser.Lex(); // Eat the operator.
2363
2364  // The source register for the shift has already been added to the
2365  // operand list, so we need to pop it off and combine it into the shifted
2366  // register operand instead.
2367  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2368  if (!PrevOp->isReg())
2369    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2370  int SrcReg = PrevOp->getReg();
2371  int64_t Imm = 0;
2372  int ShiftReg = 0;
2373  if (ShiftTy == ARM_AM::rrx) {
2374    // RRX Doesn't have an explicit shift amount. The encoder expects
2375    // the shift register to be the same as the source register. Seems odd,
2376    // but OK.
2377    ShiftReg = SrcReg;
2378  } else {
2379    // Figure out if this is shifted by a constant or a register (for non-RRX).
2380    if (Parser.getTok().is(AsmToken::Hash) ||
2381        Parser.getTok().is(AsmToken::Dollar)) {
2382      Parser.Lex(); // Eat hash.
2383      SMLoc ImmLoc = Parser.getTok().getLoc();
2384      const MCExpr *ShiftExpr = 0;
2385      if (getParser().ParseExpression(ShiftExpr)) {
2386        Error(ImmLoc, "invalid immediate shift value");
2387        return -1;
2388      }
2389      // The expression must be evaluatable as an immediate.
2390      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2391      if (!CE) {
2392        Error(ImmLoc, "invalid immediate shift value");
2393        return -1;
2394      }
2395      // Range check the immediate.
2396      // lsl, ror: 0 <= imm <= 31
2397      // lsr, asr: 0 <= imm <= 32
2398      Imm = CE->getValue();
2399      if (Imm < 0 ||
2400          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2401          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2402        Error(ImmLoc, "immediate shift value out of range");
2403        return -1;
2404      }
2405      // shift by zero is a nop. Always send it through as lsl.
2406      // ('as' compatibility)
2407      if (Imm == 0)
2408        ShiftTy = ARM_AM::lsl;
2409    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2410      ShiftReg = tryParseRegister();
2411      SMLoc L = Parser.getTok().getLoc();
2412      if (ShiftReg == -1) {
2413        Error (L, "expected immediate or register in shift operand");
2414        return -1;
2415      }
2416    } else {
2417      Error (Parser.getTok().getLoc(),
2418                    "expected immediate or register in shift operand");
2419      return -1;
2420    }
2421  }
2422
2423  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2424    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2425                                                         ShiftReg, Imm,
2426                                               S, Parser.getTok().getLoc()));
2427  else
2428    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2429                                               S, Parser.getTok().getLoc()));
2430
2431  return 0;
2432}
2433
2434
2435/// Try to parse a register name.  The token must be an Identifier when called.
2436/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2437/// if there is a "writeback". 'true' if it's not a register.
2438///
2439/// TODO this is likely to change to allow different register types and or to
2440/// parse for a specific register type.
2441bool ARMAsmParser::
2442tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2443  SMLoc S = Parser.getTok().getLoc();
2444  int RegNo = tryParseRegister();
2445  if (RegNo == -1)
2446    return true;
2447
2448  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2449
2450  const AsmToken &ExclaimTok = Parser.getTok();
2451  if (ExclaimTok.is(AsmToken::Exclaim)) {
2452    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2453                                               ExclaimTok.getLoc()));
2454    Parser.Lex(); // Eat exclaim token
2455    return false;
2456  }
2457
2458  // Also check for an index operand. This is only legal for vector registers,
2459  // but that'll get caught OK in operand matching, so we don't need to
2460  // explicitly filter everything else out here.
2461  if (Parser.getTok().is(AsmToken::LBrac)) {
2462    SMLoc SIdx = Parser.getTok().getLoc();
2463    Parser.Lex(); // Eat left bracket token.
2464
2465    const MCExpr *ImmVal;
2466    if (getParser().ParseExpression(ImmVal))
2467      return MatchOperand_ParseFail;
2468    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2469    if (!MCE) {
2470      TokError("immediate value expected for vector index");
2471      return MatchOperand_ParseFail;
2472    }
2473
2474    SMLoc E = Parser.getTok().getLoc();
2475    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2476      Error(E, "']' expected");
2477      return MatchOperand_ParseFail;
2478    }
2479
2480    Parser.Lex(); // Eat right bracket token.
2481
2482    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2483                                                     SIdx, E,
2484                                                     getContext()));
2485  }
2486
2487  return false;
2488}
2489
2490/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2491/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2492/// "c5", ...
2493static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2494  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2495  // but efficient.
2496  switch (Name.size()) {
2497  default: break;
2498  case 2:
2499    if (Name[0] != CoprocOp)
2500      return -1;
2501    switch (Name[1]) {
2502    default:  return -1;
2503    case '0': return 0;
2504    case '1': return 1;
2505    case '2': return 2;
2506    case '3': return 3;
2507    case '4': return 4;
2508    case '5': return 5;
2509    case '6': return 6;
2510    case '7': return 7;
2511    case '8': return 8;
2512    case '9': return 9;
2513    }
2514    break;
2515  case 3:
2516    if (Name[0] != CoprocOp || Name[1] != '1')
2517      return -1;
2518    switch (Name[2]) {
2519    default:  return -1;
2520    case '0': return 10;
2521    case '1': return 11;
2522    case '2': return 12;
2523    case '3': return 13;
2524    case '4': return 14;
2525    case '5': return 15;
2526    }
2527    break;
2528  }
2529
2530  return -1;
2531}
2532
2533/// parseITCondCode - Try to parse a condition code for an IT instruction.
2534ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2535parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2536  SMLoc S = Parser.getTok().getLoc();
2537  const AsmToken &Tok = Parser.getTok();
2538  if (!Tok.is(AsmToken::Identifier))
2539    return MatchOperand_NoMatch;
2540  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2541    .Case("eq", ARMCC::EQ)
2542    .Case("ne", ARMCC::NE)
2543    .Case("hs", ARMCC::HS)
2544    .Case("cs", ARMCC::HS)
2545    .Case("lo", ARMCC::LO)
2546    .Case("cc", ARMCC::LO)
2547    .Case("mi", ARMCC::MI)
2548    .Case("pl", ARMCC::PL)
2549    .Case("vs", ARMCC::VS)
2550    .Case("vc", ARMCC::VC)
2551    .Case("hi", ARMCC::HI)
2552    .Case("ls", ARMCC::LS)
2553    .Case("ge", ARMCC::GE)
2554    .Case("lt", ARMCC::LT)
2555    .Case("gt", ARMCC::GT)
2556    .Case("le", ARMCC::LE)
2557    .Case("al", ARMCC::AL)
2558    .Default(~0U);
2559  if (CC == ~0U)
2560    return MatchOperand_NoMatch;
2561  Parser.Lex(); // Eat the token.
2562
2563  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2564
2565  return MatchOperand_Success;
2566}
2567
2568/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2569/// token must be an Identifier when called, and if it is a coprocessor
2570/// number, the token is eaten and the operand is added to the operand list.
2571ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2572parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2573  SMLoc S = Parser.getTok().getLoc();
2574  const AsmToken &Tok = Parser.getTok();
2575  if (Tok.isNot(AsmToken::Identifier))
2576    return MatchOperand_NoMatch;
2577
2578  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2579  if (Num == -1)
2580    return MatchOperand_NoMatch;
2581
2582  Parser.Lex(); // Eat identifier token.
2583  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2584  return MatchOperand_Success;
2585}
2586
2587/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2588/// token must be an Identifier when called, and if it is a coprocessor
2589/// number, the token is eaten and the operand is added to the operand list.
2590ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2591parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2592  SMLoc S = Parser.getTok().getLoc();
2593  const AsmToken &Tok = Parser.getTok();
2594  if (Tok.isNot(AsmToken::Identifier))
2595    return MatchOperand_NoMatch;
2596
2597  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2598  if (Reg == -1)
2599    return MatchOperand_NoMatch;
2600
2601  Parser.Lex(); // Eat identifier token.
2602  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2603  return MatchOperand_Success;
2604}
2605
2606/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2607/// coproc_option : '{' imm0_255 '}'
2608ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2609parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2610  SMLoc S = Parser.getTok().getLoc();
2611
2612  // If this isn't a '{', this isn't a coprocessor immediate operand.
2613  if (Parser.getTok().isNot(AsmToken::LCurly))
2614    return MatchOperand_NoMatch;
2615  Parser.Lex(); // Eat the '{'
2616
2617  const MCExpr *Expr;
2618  SMLoc Loc = Parser.getTok().getLoc();
2619  if (getParser().ParseExpression(Expr)) {
2620    Error(Loc, "illegal expression");
2621    return MatchOperand_ParseFail;
2622  }
2623  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2624  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2625    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2626    return MatchOperand_ParseFail;
2627  }
2628  int Val = CE->getValue();
2629
2630  // Check for and consume the closing '}'
2631  if (Parser.getTok().isNot(AsmToken::RCurly))
2632    return MatchOperand_ParseFail;
2633  SMLoc E = Parser.getTok().getLoc();
2634  Parser.Lex(); // Eat the '}'
2635
2636  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2637  return MatchOperand_Success;
2638}
2639
2640// For register list parsing, we need to map from raw GPR register numbering
2641// to the enumeration values. The enumeration values aren't sorted by
2642// register number due to our using "sp", "lr" and "pc" as canonical names.
2643static unsigned getNextRegister(unsigned Reg) {
2644  // If this is a GPR, we need to do it manually, otherwise we can rely
2645  // on the sort ordering of the enumeration since the other reg-classes
2646  // are sane.
2647  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2648    return Reg + 1;
2649  switch(Reg) {
2650  default: assert(0 && "Invalid GPR number!");
2651  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2652  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2653  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2654  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2655  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2656  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2657  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2658  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2659  }
2660}
2661
2662// Return the low-subreg of a given Q register.
2663static unsigned getDRegFromQReg(unsigned QReg) {
2664  switch (QReg) {
2665  default: llvm_unreachable("expected a Q register!");
2666  case ARM::Q0:  return ARM::D0;
2667  case ARM::Q1:  return ARM::D2;
2668  case ARM::Q2:  return ARM::D4;
2669  case ARM::Q3:  return ARM::D6;
2670  case ARM::Q4:  return ARM::D8;
2671  case ARM::Q5:  return ARM::D10;
2672  case ARM::Q6:  return ARM::D12;
2673  case ARM::Q7:  return ARM::D14;
2674  case ARM::Q8:  return ARM::D16;
2675  case ARM::Q9:  return ARM::D18;
2676  case ARM::Q10: return ARM::D20;
2677  case ARM::Q11: return ARM::D22;
2678  case ARM::Q12: return ARM::D24;
2679  case ARM::Q13: return ARM::D26;
2680  case ARM::Q14: return ARM::D28;
2681  case ARM::Q15: return ARM::D30;
2682  }
2683}
2684
2685/// Parse a register list.
2686bool ARMAsmParser::
2687parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2688  assert(Parser.getTok().is(AsmToken::LCurly) &&
2689         "Token is not a Left Curly Brace");
2690  SMLoc S = Parser.getTok().getLoc();
2691  Parser.Lex(); // Eat '{' token.
2692  SMLoc RegLoc = Parser.getTok().getLoc();
2693
2694  // Check the first register in the list to see what register class
2695  // this is a list of.
2696  int Reg = tryParseRegister();
2697  if (Reg == -1)
2698    return Error(RegLoc, "register expected");
2699
2700  // The reglist instructions have at most 16 registers, so reserve
2701  // space for that many.
2702  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2703
2704  // Allow Q regs and just interpret them as the two D sub-registers.
2705  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2706    Reg = getDRegFromQReg(Reg);
2707    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2708    ++Reg;
2709  }
2710  const MCRegisterClass *RC;
2711  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2712    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2713  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2714    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2715  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2716    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2717  else
2718    return Error(RegLoc, "invalid register in register list");
2719
2720  // Store the register.
2721  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2722
2723  // This starts immediately after the first register token in the list,
2724  // so we can see either a comma or a minus (range separator) as a legal
2725  // next token.
2726  while (Parser.getTok().is(AsmToken::Comma) ||
2727         Parser.getTok().is(AsmToken::Minus)) {
2728    if (Parser.getTok().is(AsmToken::Minus)) {
2729      Parser.Lex(); // Eat the minus.
2730      SMLoc EndLoc = Parser.getTok().getLoc();
2731      int EndReg = tryParseRegister();
2732      if (EndReg == -1)
2733        return Error(EndLoc, "register expected");
2734      // Allow Q regs and just interpret them as the two D sub-registers.
2735      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2736        EndReg = getDRegFromQReg(EndReg) + 1;
2737      // If the register is the same as the start reg, there's nothing
2738      // more to do.
2739      if (Reg == EndReg)
2740        continue;
2741      // The register must be in the same register class as the first.
2742      if (!RC->contains(EndReg))
2743        return Error(EndLoc, "invalid register in register list");
2744      // Ranges must go from low to high.
2745      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2746        return Error(EndLoc, "bad range in register list");
2747
2748      // Add all the registers in the range to the register list.
2749      while (Reg != EndReg) {
2750        Reg = getNextRegister(Reg);
2751        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2752      }
2753      continue;
2754    }
2755    Parser.Lex(); // Eat the comma.
2756    RegLoc = Parser.getTok().getLoc();
2757    int OldReg = Reg;
2758    const AsmToken RegTok = Parser.getTok();
2759    Reg = tryParseRegister();
2760    if (Reg == -1)
2761      return Error(RegLoc, "register expected");
2762    // Allow Q regs and just interpret them as the two D sub-registers.
2763    bool isQReg = false;
2764    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2765      Reg = getDRegFromQReg(Reg);
2766      isQReg = true;
2767    }
2768    // The register must be in the same register class as the first.
2769    if (!RC->contains(Reg))
2770      return Error(RegLoc, "invalid register in register list");
2771    // List must be monotonically increasing.
2772    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2773      return Error(RegLoc, "register list not in ascending order");
2774    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2775      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2776              ") in register list");
2777      continue;
2778    }
2779    // VFP register lists must also be contiguous.
2780    // It's OK to use the enumeration values directly here rather, as the
2781    // VFP register classes have the enum sorted properly.
2782    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2783        Reg != OldReg + 1)
2784      return Error(RegLoc, "non-contiguous register range");
2785    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2786    if (isQReg)
2787      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2788  }
2789
2790  SMLoc E = Parser.getTok().getLoc();
2791  if (Parser.getTok().isNot(AsmToken::RCurly))
2792    return Error(E, "'}' expected");
2793  Parser.Lex(); // Eat '}' token.
2794
2795  // Push the register list operand.
2796  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2797
2798  // The ARM system instruction variants for LDM/STM have a '^' token here.
2799  if (Parser.getTok().is(AsmToken::Caret)) {
2800    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2801    Parser.Lex(); // Eat '^' token.
2802  }
2803
2804  return false;
2805}
2806
2807// Helper function to parse the lane index for vector lists.
2808ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2809parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2810  Index = 0; // Always return a defined index value.
2811  if (Parser.getTok().is(AsmToken::LBrac)) {
2812    Parser.Lex(); // Eat the '['.
2813    if (Parser.getTok().is(AsmToken::RBrac)) {
2814      // "Dn[]" is the 'all lanes' syntax.
2815      LaneKind = AllLanes;
2816      Parser.Lex(); // Eat the ']'.
2817      return MatchOperand_Success;
2818    }
2819    const MCExpr *LaneIndex;
2820    SMLoc Loc = Parser.getTok().getLoc();
2821    if (getParser().ParseExpression(LaneIndex)) {
2822      Error(Loc, "illegal expression");
2823      return MatchOperand_ParseFail;
2824    }
2825    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2826    if (!CE) {
2827      Error(Loc, "lane index must be empty or an integer");
2828      return MatchOperand_ParseFail;
2829    }
2830    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2831      Error(Parser.getTok().getLoc(), "']' expected");
2832      return MatchOperand_ParseFail;
2833    }
2834    Parser.Lex(); // Eat the ']'.
2835    int64_t Val = CE->getValue();
2836
2837    // FIXME: Make this range check context sensitive for .8, .16, .32.
2838    if (Val < 0 || Val > 7) {
2839      Error(Parser.getTok().getLoc(), "lane index out of range");
2840      return MatchOperand_ParseFail;
2841    }
2842    Index = Val;
2843    LaneKind = IndexedLane;
2844    return MatchOperand_Success;
2845  }
2846  LaneKind = NoLanes;
2847  return MatchOperand_Success;
2848}
2849
2850// parse a vector register list
2851ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2852parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2853  VectorLaneTy LaneKind;
2854  unsigned LaneIndex;
2855  SMLoc S = Parser.getTok().getLoc();
2856  // As an extension (to match gas), support a plain D register or Q register
2857  // (without encosing curly braces) as a single or double entry list,
2858  // respectively.
2859  if (Parser.getTok().is(AsmToken::Identifier)) {
2860    int Reg = tryParseRegister();
2861    if (Reg == -1)
2862      return MatchOperand_NoMatch;
2863    SMLoc E = Parser.getTok().getLoc();
2864    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2865      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2866      if (Res != MatchOperand_Success)
2867        return Res;
2868      switch (LaneKind) {
2869      case NoLanes:
2870        E = Parser.getTok().getLoc();
2871        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2872        break;
2873      case AllLanes:
2874        E = Parser.getTok().getLoc();
2875        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2876                                                                S, E));
2877        break;
2878      case IndexedLane:
2879        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2880                                                               LaneIndex,
2881                                                               false, S, E));
2882        break;
2883      }
2884      return MatchOperand_Success;
2885    }
2886    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2887      Reg = getDRegFromQReg(Reg);
2888      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2889      if (Res != MatchOperand_Success)
2890        return Res;
2891      switch (LaneKind) {
2892      case NoLanes:
2893        E = Parser.getTok().getLoc();
2894        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2895        break;
2896      case AllLanes:
2897        E = Parser.getTok().getLoc();
2898        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2899                                                                S, E));
2900        break;
2901      case IndexedLane:
2902        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2903                                                               LaneIndex,
2904                                                               false, S, E));
2905        break;
2906      }
2907      return MatchOperand_Success;
2908    }
2909    Error(S, "vector register expected");
2910    return MatchOperand_ParseFail;
2911  }
2912
2913  if (Parser.getTok().isNot(AsmToken::LCurly))
2914    return MatchOperand_NoMatch;
2915
2916  Parser.Lex(); // Eat '{' token.
2917  SMLoc RegLoc = Parser.getTok().getLoc();
2918
2919  int Reg = tryParseRegister();
2920  if (Reg == -1) {
2921    Error(RegLoc, "register expected");
2922    return MatchOperand_ParseFail;
2923  }
2924  unsigned Count = 1;
2925  int Spacing = 0;
2926  unsigned FirstReg = Reg;
2927  // The list is of D registers, but we also allow Q regs and just interpret
2928  // them as the two D sub-registers.
2929  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2930    FirstReg = Reg = getDRegFromQReg(Reg);
2931    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2932                 // it's ambiguous with four-register single spaced.
2933    ++Reg;
2934    ++Count;
2935  }
2936  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2937    return MatchOperand_ParseFail;
2938
2939  while (Parser.getTok().is(AsmToken::Comma) ||
2940         Parser.getTok().is(AsmToken::Minus)) {
2941    if (Parser.getTok().is(AsmToken::Minus)) {
2942      if (!Spacing)
2943        Spacing = 1; // Register range implies a single spaced list.
2944      else if (Spacing == 2) {
2945        Error(Parser.getTok().getLoc(),
2946              "sequential registers in double spaced list");
2947        return MatchOperand_ParseFail;
2948      }
2949      Parser.Lex(); // Eat the minus.
2950      SMLoc EndLoc = Parser.getTok().getLoc();
2951      int EndReg = tryParseRegister();
2952      if (EndReg == -1) {
2953        Error(EndLoc, "register expected");
2954        return MatchOperand_ParseFail;
2955      }
2956      // Allow Q regs and just interpret them as the two D sub-registers.
2957      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2958        EndReg = getDRegFromQReg(EndReg) + 1;
2959      // If the register is the same as the start reg, there's nothing
2960      // more to do.
2961      if (Reg == EndReg)
2962        continue;
2963      // The register must be in the same register class as the first.
2964      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2965        Error(EndLoc, "invalid register in register list");
2966        return MatchOperand_ParseFail;
2967      }
2968      // Ranges must go from low to high.
2969      if (Reg > EndReg) {
2970        Error(EndLoc, "bad range in register list");
2971        return MatchOperand_ParseFail;
2972      }
2973      // Parse the lane specifier if present.
2974      VectorLaneTy NextLaneKind;
2975      unsigned NextLaneIndex;
2976      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2977        return MatchOperand_ParseFail;
2978      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2979        Error(EndLoc, "mismatched lane index in register list");
2980        return MatchOperand_ParseFail;
2981      }
2982      EndLoc = Parser.getTok().getLoc();
2983
2984      // Add all the registers in the range to the register list.
2985      Count += EndReg - Reg;
2986      Reg = EndReg;
2987      continue;
2988    }
2989    Parser.Lex(); // Eat the comma.
2990    RegLoc = Parser.getTok().getLoc();
2991    int OldReg = Reg;
2992    Reg = tryParseRegister();
2993    if (Reg == -1) {
2994      Error(RegLoc, "register expected");
2995      return MatchOperand_ParseFail;
2996    }
2997    // vector register lists must be contiguous.
2998    // It's OK to use the enumeration values directly here rather, as the
2999    // VFP register classes have the enum sorted properly.
3000    //
3001    // The list is of D registers, but we also allow Q regs and just interpret
3002    // them as the two D sub-registers.
3003    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3004      if (!Spacing)
3005        Spacing = 1; // Register range implies a single spaced list.
3006      else if (Spacing == 2) {
3007        Error(RegLoc,
3008              "invalid register in double-spaced list (must be 'D' register')");
3009        return MatchOperand_ParseFail;
3010      }
3011      Reg = getDRegFromQReg(Reg);
3012      if (Reg != OldReg + 1) {
3013        Error(RegLoc, "non-contiguous register range");
3014        return MatchOperand_ParseFail;
3015      }
3016      ++Reg;
3017      Count += 2;
3018      // Parse the lane specifier if present.
3019      VectorLaneTy NextLaneKind;
3020      unsigned NextLaneIndex;
3021      SMLoc EndLoc = Parser.getTok().getLoc();
3022      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3023        return MatchOperand_ParseFail;
3024      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3025        Error(EndLoc, "mismatched lane index in register list");
3026        return MatchOperand_ParseFail;
3027      }
3028      continue;
3029    }
3030    // Normal D register.
3031    // Figure out the register spacing (single or double) of the list if
3032    // we don't know it already.
3033    if (!Spacing)
3034      Spacing = 1 + (Reg == OldReg + 2);
3035
3036    // Just check that it's contiguous and keep going.
3037    if (Reg != OldReg + Spacing) {
3038      Error(RegLoc, "non-contiguous register range");
3039      return MatchOperand_ParseFail;
3040    }
3041    ++Count;
3042    // Parse the lane specifier if present.
3043    VectorLaneTy NextLaneKind;
3044    unsigned NextLaneIndex;
3045    SMLoc EndLoc = Parser.getTok().getLoc();
3046    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3047      return MatchOperand_ParseFail;
3048    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3049      Error(EndLoc, "mismatched lane index in register list");
3050      return MatchOperand_ParseFail;
3051    }
3052  }
3053
3054  SMLoc E = Parser.getTok().getLoc();
3055  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3056    Error(E, "'}' expected");
3057    return MatchOperand_ParseFail;
3058  }
3059  Parser.Lex(); // Eat '}' token.
3060
3061  switch (LaneKind) {
3062  case NoLanes:
3063    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3064                                                    (Spacing == 2), S, E));
3065    break;
3066  case AllLanes:
3067    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3068                                                            (Spacing == 2),
3069                                                            S, E));
3070    break;
3071  case IndexedLane:
3072    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3073                                                           LaneIndex,
3074                                                           (Spacing == 2),
3075                                                           S, E));
3076    break;
3077  }
3078  return MatchOperand_Success;
3079}
3080
3081/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3082ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3083parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3084  SMLoc S = Parser.getTok().getLoc();
3085  const AsmToken &Tok = Parser.getTok();
3086  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3087  StringRef OptStr = Tok.getString();
3088
3089  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3090    .Case("sy",    ARM_MB::SY)
3091    .Case("st",    ARM_MB::ST)
3092    .Case("sh",    ARM_MB::ISH)
3093    .Case("ish",   ARM_MB::ISH)
3094    .Case("shst",  ARM_MB::ISHST)
3095    .Case("ishst", ARM_MB::ISHST)
3096    .Case("nsh",   ARM_MB::NSH)
3097    .Case("un",    ARM_MB::NSH)
3098    .Case("nshst", ARM_MB::NSHST)
3099    .Case("unst",  ARM_MB::NSHST)
3100    .Case("osh",   ARM_MB::OSH)
3101    .Case("oshst", ARM_MB::OSHST)
3102    .Default(~0U);
3103
3104  if (Opt == ~0U)
3105    return MatchOperand_NoMatch;
3106
3107  Parser.Lex(); // Eat identifier token.
3108  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3109  return MatchOperand_Success;
3110}
3111
3112/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3113ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3114parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3115  SMLoc S = Parser.getTok().getLoc();
3116  const AsmToken &Tok = Parser.getTok();
3117  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3118  StringRef IFlagsStr = Tok.getString();
3119
3120  // An iflags string of "none" is interpreted to mean that none of the AIF
3121  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3122  unsigned IFlags = 0;
3123  if (IFlagsStr != "none") {
3124        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3125      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3126        .Case("a", ARM_PROC::A)
3127        .Case("i", ARM_PROC::I)
3128        .Case("f", ARM_PROC::F)
3129        .Default(~0U);
3130
3131      // If some specific iflag is already set, it means that some letter is
3132      // present more than once, this is not acceptable.
3133      if (Flag == ~0U || (IFlags & Flag))
3134        return MatchOperand_NoMatch;
3135
3136      IFlags |= Flag;
3137    }
3138  }
3139
3140  Parser.Lex(); // Eat identifier token.
3141  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3142  return MatchOperand_Success;
3143}
3144
3145/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3146ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3147parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3148  SMLoc S = Parser.getTok().getLoc();
3149  const AsmToken &Tok = Parser.getTok();
3150  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3151  StringRef Mask = Tok.getString();
3152
3153  if (isMClass()) {
3154    // See ARMv6-M 10.1.1
3155    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3156      .Case("apsr", 0)
3157      .Case("iapsr", 1)
3158      .Case("eapsr", 2)
3159      .Case("xpsr", 3)
3160      .Case("ipsr", 5)
3161      .Case("epsr", 6)
3162      .Case("iepsr", 7)
3163      .Case("msp", 8)
3164      .Case("psp", 9)
3165      .Case("primask", 16)
3166      .Case("basepri", 17)
3167      .Case("basepri_max", 18)
3168      .Case("faultmask", 19)
3169      .Case("control", 20)
3170      .Default(~0U);
3171
3172    if (FlagsVal == ~0U)
3173      return MatchOperand_NoMatch;
3174
3175    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3176      // basepri, basepri_max and faultmask only valid for V7m.
3177      return MatchOperand_NoMatch;
3178
3179    Parser.Lex(); // Eat identifier token.
3180    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3181    return MatchOperand_Success;
3182  }
3183
3184  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3185  size_t Start = 0, Next = Mask.find('_');
3186  StringRef Flags = "";
3187  std::string SpecReg = Mask.slice(Start, Next).lower();
3188  if (Next != StringRef::npos)
3189    Flags = Mask.slice(Next+1, Mask.size());
3190
3191  // FlagsVal contains the complete mask:
3192  // 3-0: Mask
3193  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3194  unsigned FlagsVal = 0;
3195
3196  if (SpecReg == "apsr") {
3197    FlagsVal = StringSwitch<unsigned>(Flags)
3198    .Case("nzcvq",  0x8) // same as CPSR_f
3199    .Case("g",      0x4) // same as CPSR_s
3200    .Case("nzcvqg", 0xc) // same as CPSR_fs
3201    .Default(~0U);
3202
3203    if (FlagsVal == ~0U) {
3204      if (!Flags.empty())
3205        return MatchOperand_NoMatch;
3206      else
3207        FlagsVal = 8; // No flag
3208    }
3209  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3210    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3211      Flags = "fc";
3212    for (int i = 0, e = Flags.size(); i != e; ++i) {
3213      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3214      .Case("c", 1)
3215      .Case("x", 2)
3216      .Case("s", 4)
3217      .Case("f", 8)
3218      .Default(~0U);
3219
3220      // If some specific flag is already set, it means that some letter is
3221      // present more than once, this is not acceptable.
3222      if (FlagsVal == ~0U || (FlagsVal & Flag))
3223        return MatchOperand_NoMatch;
3224      FlagsVal |= Flag;
3225    }
3226  } else // No match for special register.
3227    return MatchOperand_NoMatch;
3228
3229  // Special register without flags is NOT equivalent to "fc" flags.
3230  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3231  // two lines would enable gas compatibility at the expense of breaking
3232  // round-tripping.
3233  //
3234  // if (!FlagsVal)
3235  //  FlagsVal = 0x9;
3236
3237  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3238  if (SpecReg == "spsr")
3239    FlagsVal |= 16;
3240
3241  Parser.Lex(); // Eat identifier token.
3242  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3243  return MatchOperand_Success;
3244}
3245
3246ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3247parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3248            int Low, int High) {
3249  const AsmToken &Tok = Parser.getTok();
3250  if (Tok.isNot(AsmToken::Identifier)) {
3251    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3252    return MatchOperand_ParseFail;
3253  }
3254  StringRef ShiftName = Tok.getString();
3255  std::string LowerOp = Op.lower();
3256  std::string UpperOp = Op.upper();
3257  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3258    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3259    return MatchOperand_ParseFail;
3260  }
3261  Parser.Lex(); // Eat shift type token.
3262
3263  // There must be a '#' and a shift amount.
3264  if (Parser.getTok().isNot(AsmToken::Hash) &&
3265      Parser.getTok().isNot(AsmToken::Dollar)) {
3266    Error(Parser.getTok().getLoc(), "'#' expected");
3267    return MatchOperand_ParseFail;
3268  }
3269  Parser.Lex(); // Eat hash token.
3270
3271  const MCExpr *ShiftAmount;
3272  SMLoc Loc = Parser.getTok().getLoc();
3273  if (getParser().ParseExpression(ShiftAmount)) {
3274    Error(Loc, "illegal expression");
3275    return MatchOperand_ParseFail;
3276  }
3277  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3278  if (!CE) {
3279    Error(Loc, "constant expression expected");
3280    return MatchOperand_ParseFail;
3281  }
3282  int Val = CE->getValue();
3283  if (Val < Low || Val > High) {
3284    Error(Loc, "immediate value out of range");
3285    return MatchOperand_ParseFail;
3286  }
3287
3288  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3289
3290  return MatchOperand_Success;
3291}
3292
3293ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3294parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3295  const AsmToken &Tok = Parser.getTok();
3296  SMLoc S = Tok.getLoc();
3297  if (Tok.isNot(AsmToken::Identifier)) {
3298    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3299    return MatchOperand_ParseFail;
3300  }
3301  int Val = StringSwitch<int>(Tok.getString())
3302    .Case("be", 1)
3303    .Case("le", 0)
3304    .Default(-1);
3305  Parser.Lex(); // Eat the token.
3306
3307  if (Val == -1) {
3308    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3309    return MatchOperand_ParseFail;
3310  }
3311  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3312                                                                  getContext()),
3313                                           S, Parser.getTok().getLoc()));
3314  return MatchOperand_Success;
3315}
3316
3317/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3318/// instructions. Legal values are:
3319///     lsl #n  'n' in [0,31]
3320///     asr #n  'n' in [1,32]
3321///             n == 32 encoded as n == 0.
3322ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3323parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3324  const AsmToken &Tok = Parser.getTok();
3325  SMLoc S = Tok.getLoc();
3326  if (Tok.isNot(AsmToken::Identifier)) {
3327    Error(S, "shift operator 'asr' or 'lsl' expected");
3328    return MatchOperand_ParseFail;
3329  }
3330  StringRef ShiftName = Tok.getString();
3331  bool isASR;
3332  if (ShiftName == "lsl" || ShiftName == "LSL")
3333    isASR = false;
3334  else if (ShiftName == "asr" || ShiftName == "ASR")
3335    isASR = true;
3336  else {
3337    Error(S, "shift operator 'asr' or 'lsl' expected");
3338    return MatchOperand_ParseFail;
3339  }
3340  Parser.Lex(); // Eat the operator.
3341
3342  // A '#' and a shift amount.
3343  if (Parser.getTok().isNot(AsmToken::Hash) &&
3344      Parser.getTok().isNot(AsmToken::Dollar)) {
3345    Error(Parser.getTok().getLoc(), "'#' expected");
3346    return MatchOperand_ParseFail;
3347  }
3348  Parser.Lex(); // Eat hash token.
3349
3350  const MCExpr *ShiftAmount;
3351  SMLoc E = Parser.getTok().getLoc();
3352  if (getParser().ParseExpression(ShiftAmount)) {
3353    Error(E, "malformed shift expression");
3354    return MatchOperand_ParseFail;
3355  }
3356  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3357  if (!CE) {
3358    Error(E, "shift amount must be an immediate");
3359    return MatchOperand_ParseFail;
3360  }
3361
3362  int64_t Val = CE->getValue();
3363  if (isASR) {
3364    // Shift amount must be in [1,32]
3365    if (Val < 1 || Val > 32) {
3366      Error(E, "'asr' shift amount must be in range [1,32]");
3367      return MatchOperand_ParseFail;
3368    }
3369    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3370    if (isThumb() && Val == 32) {
3371      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3372      return MatchOperand_ParseFail;
3373    }
3374    if (Val == 32) Val = 0;
3375  } else {
3376    // Shift amount must be in [1,32]
3377    if (Val < 0 || Val > 31) {
3378      Error(E, "'lsr' shift amount must be in range [0,31]");
3379      return MatchOperand_ParseFail;
3380    }
3381  }
3382
3383  E = Parser.getTok().getLoc();
3384  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3385
3386  return MatchOperand_Success;
3387}
3388
3389/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3390/// of instructions. Legal values are:
3391///     ror #n  'n' in {0, 8, 16, 24}
3392ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3393parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3394  const AsmToken &Tok = Parser.getTok();
3395  SMLoc S = Tok.getLoc();
3396  if (Tok.isNot(AsmToken::Identifier))
3397    return MatchOperand_NoMatch;
3398  StringRef ShiftName = Tok.getString();
3399  if (ShiftName != "ror" && ShiftName != "ROR")
3400    return MatchOperand_NoMatch;
3401  Parser.Lex(); // Eat the operator.
3402
3403  // A '#' and a rotate amount.
3404  if (Parser.getTok().isNot(AsmToken::Hash) &&
3405      Parser.getTok().isNot(AsmToken::Dollar)) {
3406    Error(Parser.getTok().getLoc(), "'#' expected");
3407    return MatchOperand_ParseFail;
3408  }
3409  Parser.Lex(); // Eat hash token.
3410
3411  const MCExpr *ShiftAmount;
3412  SMLoc E = Parser.getTok().getLoc();
3413  if (getParser().ParseExpression(ShiftAmount)) {
3414    Error(E, "malformed rotate expression");
3415    return MatchOperand_ParseFail;
3416  }
3417  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3418  if (!CE) {
3419    Error(E, "rotate amount must be an immediate");
3420    return MatchOperand_ParseFail;
3421  }
3422
3423  int64_t Val = CE->getValue();
3424  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3425  // normally, zero is represented in asm by omitting the rotate operand
3426  // entirely.
3427  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3428    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3429    return MatchOperand_ParseFail;
3430  }
3431
3432  E = Parser.getTok().getLoc();
3433  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3434
3435  return MatchOperand_Success;
3436}
3437
3438ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3439parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3440  SMLoc S = Parser.getTok().getLoc();
3441  // The bitfield descriptor is really two operands, the LSB and the width.
3442  if (Parser.getTok().isNot(AsmToken::Hash) &&
3443      Parser.getTok().isNot(AsmToken::Dollar)) {
3444    Error(Parser.getTok().getLoc(), "'#' expected");
3445    return MatchOperand_ParseFail;
3446  }
3447  Parser.Lex(); // Eat hash token.
3448
3449  const MCExpr *LSBExpr;
3450  SMLoc E = Parser.getTok().getLoc();
3451  if (getParser().ParseExpression(LSBExpr)) {
3452    Error(E, "malformed immediate expression");
3453    return MatchOperand_ParseFail;
3454  }
3455  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3456  if (!CE) {
3457    Error(E, "'lsb' operand must be an immediate");
3458    return MatchOperand_ParseFail;
3459  }
3460
3461  int64_t LSB = CE->getValue();
3462  // The LSB must be in the range [0,31]
3463  if (LSB < 0 || LSB > 31) {
3464    Error(E, "'lsb' operand must be in the range [0,31]");
3465    return MatchOperand_ParseFail;
3466  }
3467  E = Parser.getTok().getLoc();
3468
3469  // Expect another immediate operand.
3470  if (Parser.getTok().isNot(AsmToken::Comma)) {
3471    Error(Parser.getTok().getLoc(), "too few operands");
3472    return MatchOperand_ParseFail;
3473  }
3474  Parser.Lex(); // Eat hash token.
3475  if (Parser.getTok().isNot(AsmToken::Hash) &&
3476      Parser.getTok().isNot(AsmToken::Dollar)) {
3477    Error(Parser.getTok().getLoc(), "'#' expected");
3478    return MatchOperand_ParseFail;
3479  }
3480  Parser.Lex(); // Eat hash token.
3481
3482  const MCExpr *WidthExpr;
3483  if (getParser().ParseExpression(WidthExpr)) {
3484    Error(E, "malformed immediate expression");
3485    return MatchOperand_ParseFail;
3486  }
3487  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3488  if (!CE) {
3489    Error(E, "'width' operand must be an immediate");
3490    return MatchOperand_ParseFail;
3491  }
3492
3493  int64_t Width = CE->getValue();
3494  // The LSB must be in the range [1,32-lsb]
3495  if (Width < 1 || Width > 32 - LSB) {
3496    Error(E, "'width' operand must be in the range [1,32-lsb]");
3497    return MatchOperand_ParseFail;
3498  }
3499  E = Parser.getTok().getLoc();
3500
3501  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3502
3503  return MatchOperand_Success;
3504}
3505
3506ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3507parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3508  // Check for a post-index addressing register operand. Specifically:
3509  // postidx_reg := '+' register {, shift}
3510  //              | '-' register {, shift}
3511  //              | register {, shift}
3512
3513  // This method must return MatchOperand_NoMatch without consuming any tokens
3514  // in the case where there is no match, as other alternatives take other
3515  // parse methods.
3516  AsmToken Tok = Parser.getTok();
3517  SMLoc S = Tok.getLoc();
3518  bool haveEaten = false;
3519  bool isAdd = true;
3520  int Reg = -1;
3521  if (Tok.is(AsmToken::Plus)) {
3522    Parser.Lex(); // Eat the '+' token.
3523    haveEaten = true;
3524  } else if (Tok.is(AsmToken::Minus)) {
3525    Parser.Lex(); // Eat the '-' token.
3526    isAdd = false;
3527    haveEaten = true;
3528  }
3529  if (Parser.getTok().is(AsmToken::Identifier))
3530    Reg = tryParseRegister();
3531  if (Reg == -1) {
3532    if (!haveEaten)
3533      return MatchOperand_NoMatch;
3534    Error(Parser.getTok().getLoc(), "register expected");
3535    return MatchOperand_ParseFail;
3536  }
3537  SMLoc E = Parser.getTok().getLoc();
3538
3539  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3540  unsigned ShiftImm = 0;
3541  if (Parser.getTok().is(AsmToken::Comma)) {
3542    Parser.Lex(); // Eat the ','.
3543    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3544      return MatchOperand_ParseFail;
3545  }
3546
3547  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3548                                                  ShiftImm, S, E));
3549
3550  return MatchOperand_Success;
3551}
3552
3553ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3554parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3555  // Check for a post-index addressing register operand. Specifically:
3556  // am3offset := '+' register
3557  //              | '-' register
3558  //              | register
3559  //              | # imm
3560  //              | # + imm
3561  //              | # - imm
3562
3563  // This method must return MatchOperand_NoMatch without consuming any tokens
3564  // in the case where there is no match, as other alternatives take other
3565  // parse methods.
3566  AsmToken Tok = Parser.getTok();
3567  SMLoc S = Tok.getLoc();
3568
3569  // Do immediates first, as we always parse those if we have a '#'.
3570  if (Parser.getTok().is(AsmToken::Hash) ||
3571      Parser.getTok().is(AsmToken::Dollar)) {
3572    Parser.Lex(); // Eat the '#'.
3573    // Explicitly look for a '-', as we need to encode negative zero
3574    // differently.
3575    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3576    const MCExpr *Offset;
3577    if (getParser().ParseExpression(Offset))
3578      return MatchOperand_ParseFail;
3579    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3580    if (!CE) {
3581      Error(S, "constant expression expected");
3582      return MatchOperand_ParseFail;
3583    }
3584    SMLoc E = Tok.getLoc();
3585    // Negative zero is encoded as the flag value INT32_MIN.
3586    int32_t Val = CE->getValue();
3587    if (isNegative && Val == 0)
3588      Val = INT32_MIN;
3589
3590    Operands.push_back(
3591      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3592
3593    return MatchOperand_Success;
3594  }
3595
3596
3597  bool haveEaten = false;
3598  bool isAdd = true;
3599  int Reg = -1;
3600  if (Tok.is(AsmToken::Plus)) {
3601    Parser.Lex(); // Eat the '+' token.
3602    haveEaten = true;
3603  } else if (Tok.is(AsmToken::Minus)) {
3604    Parser.Lex(); // Eat the '-' token.
3605    isAdd = false;
3606    haveEaten = true;
3607  }
3608  if (Parser.getTok().is(AsmToken::Identifier))
3609    Reg = tryParseRegister();
3610  if (Reg == -1) {
3611    if (!haveEaten)
3612      return MatchOperand_NoMatch;
3613    Error(Parser.getTok().getLoc(), "register expected");
3614    return MatchOperand_ParseFail;
3615  }
3616  SMLoc E = Parser.getTok().getLoc();
3617
3618  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3619                                                  0, S, E));
3620
3621  return MatchOperand_Success;
3622}
3623
3624/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3625/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3626/// when they refer multiple MIOperands inside a single one.
3627bool ARMAsmParser::
3628cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3629             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3630  // Rt, Rt2
3631  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3632  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3633  // Create a writeback register dummy placeholder.
3634  Inst.addOperand(MCOperand::CreateReg(0));
3635  // addr
3636  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3637  // pred
3638  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3639  return true;
3640}
3641
3642/// cvtT2StrdPre - Convert parsed operands to MCInst.
3643/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3644/// when they refer multiple MIOperands inside a single one.
3645bool ARMAsmParser::
3646cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3647             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3648  // Create a writeback register dummy placeholder.
3649  Inst.addOperand(MCOperand::CreateReg(0));
3650  // Rt, Rt2
3651  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3652  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3653  // addr
3654  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3655  // pred
3656  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3657  return true;
3658}
3659
3660/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3661/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3662/// when they refer multiple MIOperands inside a single one.
3663bool ARMAsmParser::
3664cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3665                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3666  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3667
3668  // Create a writeback register dummy placeholder.
3669  Inst.addOperand(MCOperand::CreateImm(0));
3670
3671  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3672  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3673  return true;
3674}
3675
3676/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3677/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3678/// when they refer multiple MIOperands inside a single one.
3679bool ARMAsmParser::
3680cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3681                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3682  // Create a writeback register dummy placeholder.
3683  Inst.addOperand(MCOperand::CreateImm(0));
3684  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3685  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3686  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3687  return true;
3688}
3689
3690/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3691/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3692/// when they refer multiple MIOperands inside a single one.
3693bool ARMAsmParser::
3694cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3695                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3696  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3697
3698  // Create a writeback register dummy placeholder.
3699  Inst.addOperand(MCOperand::CreateImm(0));
3700
3701  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3702  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3703  return true;
3704}
3705
3706/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3707/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3708/// when they refer multiple MIOperands inside a single one.
3709bool ARMAsmParser::
3710cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3711                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3712  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3713
3714  // Create a writeback register dummy placeholder.
3715  Inst.addOperand(MCOperand::CreateImm(0));
3716
3717  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3718  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3719  return true;
3720}
3721
3722
3723/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3724/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3725/// when they refer multiple MIOperands inside a single one.
3726bool ARMAsmParser::
3727cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3728                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3729  // Create a writeback register dummy placeholder.
3730  Inst.addOperand(MCOperand::CreateImm(0));
3731  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3732  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3733  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3734  return true;
3735}
3736
3737/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3738/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3739/// when they refer multiple MIOperands inside a single one.
3740bool ARMAsmParser::
3741cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3742                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3743  // Create a writeback register dummy placeholder.
3744  Inst.addOperand(MCOperand::CreateImm(0));
3745  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3746  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3747  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3748  return true;
3749}
3750
3751/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3752/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3753/// when they refer multiple MIOperands inside a single one.
3754bool ARMAsmParser::
3755cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3756                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3757  // Create a writeback register dummy placeholder.
3758  Inst.addOperand(MCOperand::CreateImm(0));
3759  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3760  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3761  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3762  return true;
3763}
3764
3765/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3766/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3767/// when they refer multiple MIOperands inside a single one.
3768bool ARMAsmParser::
3769cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3770                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3771  // Rt
3772  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3773  // Create a writeback register dummy placeholder.
3774  Inst.addOperand(MCOperand::CreateImm(0));
3775  // addr
3776  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3777  // offset
3778  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3779  // pred
3780  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3781  return true;
3782}
3783
3784/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3785/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3786/// when they refer multiple MIOperands inside a single one.
3787bool ARMAsmParser::
3788cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3789                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3790  // Rt
3791  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3792  // Create a writeback register dummy placeholder.
3793  Inst.addOperand(MCOperand::CreateImm(0));
3794  // addr
3795  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3796  // offset
3797  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3798  // pred
3799  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3800  return true;
3801}
3802
3803/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3804/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3805/// when they refer multiple MIOperands inside a single one.
3806bool ARMAsmParser::
3807cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3808                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3809  // Create a writeback register dummy placeholder.
3810  Inst.addOperand(MCOperand::CreateImm(0));
3811  // Rt
3812  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3813  // addr
3814  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3815  // offset
3816  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3817  // pred
3818  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3819  return true;
3820}
3821
3822/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3823/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3824/// when they refer multiple MIOperands inside a single one.
3825bool ARMAsmParser::
3826cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3827                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3828  // Create a writeback register dummy placeholder.
3829  Inst.addOperand(MCOperand::CreateImm(0));
3830  // Rt
3831  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3832  // addr
3833  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3834  // offset
3835  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3836  // pred
3837  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3838  return true;
3839}
3840
3841/// cvtLdrdPre - Convert parsed operands to MCInst.
3842/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3843/// when they refer multiple MIOperands inside a single one.
3844bool ARMAsmParser::
3845cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3846           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3847  // Rt, Rt2
3848  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3849  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3850  // Create a writeback register dummy placeholder.
3851  Inst.addOperand(MCOperand::CreateImm(0));
3852  // addr
3853  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3854  // pred
3855  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3856  return true;
3857}
3858
3859/// cvtStrdPre - Convert parsed operands to MCInst.
3860/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3861/// when they refer multiple MIOperands inside a single one.
3862bool ARMAsmParser::
3863cvtStrdPre(MCInst &Inst, unsigned Opcode,
3864           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3865  // Create a writeback register dummy placeholder.
3866  Inst.addOperand(MCOperand::CreateImm(0));
3867  // Rt, Rt2
3868  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3869  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3870  // addr
3871  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3872  // pred
3873  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3874  return true;
3875}
3876
3877/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3878/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3879/// when they refer multiple MIOperands inside a single one.
3880bool ARMAsmParser::
3881cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3882                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3883  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3884  // Create a writeback register dummy placeholder.
3885  Inst.addOperand(MCOperand::CreateImm(0));
3886  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3887  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3888  return true;
3889}
3890
3891/// cvtThumbMultiple- Convert parsed operands to MCInst.
3892/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3893/// when they refer multiple MIOperands inside a single one.
3894bool ARMAsmParser::
3895cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3896           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3897  // The second source operand must be the same register as the destination
3898  // operand.
3899  if (Operands.size() == 6 &&
3900      (((ARMOperand*)Operands[3])->getReg() !=
3901       ((ARMOperand*)Operands[5])->getReg()) &&
3902      (((ARMOperand*)Operands[3])->getReg() !=
3903       ((ARMOperand*)Operands[4])->getReg())) {
3904    Error(Operands[3]->getStartLoc(),
3905          "destination register must match source register");
3906    return false;
3907  }
3908  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3909  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3910  // If we have a three-operand form, make sure to set Rn to be the operand
3911  // that isn't the same as Rd.
3912  unsigned RegOp = 4;
3913  if (Operands.size() == 6 &&
3914      ((ARMOperand*)Operands[4])->getReg() ==
3915        ((ARMOperand*)Operands[3])->getReg())
3916    RegOp = 5;
3917  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3918  Inst.addOperand(Inst.getOperand(0));
3919  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3920
3921  return true;
3922}
3923
3924bool ARMAsmParser::
3925cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3926              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3927  // Vd
3928  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3929  // Create a writeback register dummy placeholder.
3930  Inst.addOperand(MCOperand::CreateImm(0));
3931  // Vn
3932  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3933  // pred
3934  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3935  return true;
3936}
3937
3938bool ARMAsmParser::
3939cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3940                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3941  // Vd
3942  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3943  // Create a writeback register dummy placeholder.
3944  Inst.addOperand(MCOperand::CreateImm(0));
3945  // Vn
3946  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3947  // Vm
3948  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3949  // pred
3950  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3951  return true;
3952}
3953
3954bool ARMAsmParser::
3955cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3956              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3957  // Create a writeback register dummy placeholder.
3958  Inst.addOperand(MCOperand::CreateImm(0));
3959  // Vn
3960  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3961  // Vt
3962  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3963  // pred
3964  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3965  return true;
3966}
3967
3968bool ARMAsmParser::
3969cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3970                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3971  // Create a writeback register dummy placeholder.
3972  Inst.addOperand(MCOperand::CreateImm(0));
3973  // Vn
3974  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3975  // Vm
3976  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3977  // Vt
3978  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3979  // pred
3980  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3981  return true;
3982}
3983
3984/// Parse an ARM memory expression, return false if successful else return true
3985/// or an error.  The first token must be a '[' when called.
3986bool ARMAsmParser::
3987parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3988  SMLoc S, E;
3989  assert(Parser.getTok().is(AsmToken::LBrac) &&
3990         "Token is not a Left Bracket");
3991  S = Parser.getTok().getLoc();
3992  Parser.Lex(); // Eat left bracket token.
3993
3994  const AsmToken &BaseRegTok = Parser.getTok();
3995  int BaseRegNum = tryParseRegister();
3996  if (BaseRegNum == -1)
3997    return Error(BaseRegTok.getLoc(), "register expected");
3998
3999  // The next token must either be a comma or a closing bracket.
4000  const AsmToken &Tok = Parser.getTok();
4001  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4002    return Error(Tok.getLoc(), "malformed memory operand");
4003
4004  if (Tok.is(AsmToken::RBrac)) {
4005    E = Tok.getLoc();
4006    Parser.Lex(); // Eat right bracket token.
4007
4008    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4009                                             0, 0, false, S, E));
4010
4011    // If there's a pre-indexing writeback marker, '!', just add it as a token
4012    // operand. It's rather odd, but syntactically valid.
4013    if (Parser.getTok().is(AsmToken::Exclaim)) {
4014      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4015      Parser.Lex(); // Eat the '!'.
4016    }
4017
4018    return false;
4019  }
4020
4021  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4022  Parser.Lex(); // Eat the comma.
4023
4024  // If we have a ':', it's an alignment specifier.
4025  if (Parser.getTok().is(AsmToken::Colon)) {
4026    Parser.Lex(); // Eat the ':'.
4027    E = Parser.getTok().getLoc();
4028
4029    const MCExpr *Expr;
4030    if (getParser().ParseExpression(Expr))
4031     return true;
4032
4033    // The expression has to be a constant. Memory references with relocations
4034    // don't come through here, as they use the <label> forms of the relevant
4035    // instructions.
4036    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4037    if (!CE)
4038      return Error (E, "constant expression expected");
4039
4040    unsigned Align = 0;
4041    switch (CE->getValue()) {
4042    default:
4043      return Error(E,
4044                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4045    case 16:  Align = 2; break;
4046    case 32:  Align = 4; break;
4047    case 64:  Align = 8; break;
4048    case 128: Align = 16; break;
4049    case 256: Align = 32; break;
4050    }
4051
4052    // Now we should have the closing ']'
4053    E = Parser.getTok().getLoc();
4054    if (Parser.getTok().isNot(AsmToken::RBrac))
4055      return Error(E, "']' expected");
4056    Parser.Lex(); // Eat right bracket token.
4057
4058    // Don't worry about range checking the value here. That's handled by
4059    // the is*() predicates.
4060    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4061                                             ARM_AM::no_shift, 0, Align,
4062                                             false, S, E));
4063
4064    // If there's a pre-indexing writeback marker, '!', just add it as a token
4065    // operand.
4066    if (Parser.getTok().is(AsmToken::Exclaim)) {
4067      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4068      Parser.Lex(); // Eat the '!'.
4069    }
4070
4071    return false;
4072  }
4073
4074  // If we have a '#', it's an immediate offset, else assume it's a register
4075  // offset. Be friendly and also accept a plain integer (without a leading
4076  // hash) for gas compatibility.
4077  if (Parser.getTok().is(AsmToken::Hash) ||
4078      Parser.getTok().is(AsmToken::Dollar) ||
4079      Parser.getTok().is(AsmToken::Integer)) {
4080    if (Parser.getTok().isNot(AsmToken::Integer))
4081      Parser.Lex(); // Eat the '#'.
4082    E = Parser.getTok().getLoc();
4083
4084    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4085    const MCExpr *Offset;
4086    if (getParser().ParseExpression(Offset))
4087     return true;
4088
4089    // The expression has to be a constant. Memory references with relocations
4090    // don't come through here, as they use the <label> forms of the relevant
4091    // instructions.
4092    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4093    if (!CE)
4094      return Error (E, "constant expression expected");
4095
4096    // If the constant was #-0, represent it as INT32_MIN.
4097    int32_t Val = CE->getValue();
4098    if (isNegative && Val == 0)
4099      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4100
4101    // Now we should have the closing ']'
4102    E = Parser.getTok().getLoc();
4103    if (Parser.getTok().isNot(AsmToken::RBrac))
4104      return Error(E, "']' expected");
4105    Parser.Lex(); // Eat right bracket token.
4106
4107    // Don't worry about range checking the value here. That's handled by
4108    // the is*() predicates.
4109    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4110                                             ARM_AM::no_shift, 0, 0,
4111                                             false, S, E));
4112
4113    // If there's a pre-indexing writeback marker, '!', just add it as a token
4114    // operand.
4115    if (Parser.getTok().is(AsmToken::Exclaim)) {
4116      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4117      Parser.Lex(); // Eat the '!'.
4118    }
4119
4120    return false;
4121  }
4122
4123  // The register offset is optionally preceded by a '+' or '-'
4124  bool isNegative = false;
4125  if (Parser.getTok().is(AsmToken::Minus)) {
4126    isNegative = true;
4127    Parser.Lex(); // Eat the '-'.
4128  } else if (Parser.getTok().is(AsmToken::Plus)) {
4129    // Nothing to do.
4130    Parser.Lex(); // Eat the '+'.
4131  }
4132
4133  E = Parser.getTok().getLoc();
4134  int OffsetRegNum = tryParseRegister();
4135  if (OffsetRegNum == -1)
4136    return Error(E, "register expected");
4137
4138  // If there's a shift operator, handle it.
4139  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4140  unsigned ShiftImm = 0;
4141  if (Parser.getTok().is(AsmToken::Comma)) {
4142    Parser.Lex(); // Eat the ','.
4143    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4144      return true;
4145  }
4146
4147  // Now we should have the closing ']'
4148  E = Parser.getTok().getLoc();
4149  if (Parser.getTok().isNot(AsmToken::RBrac))
4150    return Error(E, "']' expected");
4151  Parser.Lex(); // Eat right bracket token.
4152
4153  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4154                                           ShiftType, ShiftImm, 0, isNegative,
4155                                           S, E));
4156
4157  // If there's a pre-indexing writeback marker, '!', just add it as a token
4158  // operand.
4159  if (Parser.getTok().is(AsmToken::Exclaim)) {
4160    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4161    Parser.Lex(); // Eat the '!'.
4162  }
4163
4164  return false;
4165}
4166
4167/// parseMemRegOffsetShift - one of these two:
4168///   ( lsl | lsr | asr | ror ) , # shift_amount
4169///   rrx
4170/// return true if it parses a shift otherwise it returns false.
4171bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4172                                          unsigned &Amount) {
4173  SMLoc Loc = Parser.getTok().getLoc();
4174  const AsmToken &Tok = Parser.getTok();
4175  if (Tok.isNot(AsmToken::Identifier))
4176    return true;
4177  StringRef ShiftName = Tok.getString();
4178  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4179      ShiftName == "asl" || ShiftName == "ASL")
4180    St = ARM_AM::lsl;
4181  else if (ShiftName == "lsr" || ShiftName == "LSR")
4182    St = ARM_AM::lsr;
4183  else if (ShiftName == "asr" || ShiftName == "ASR")
4184    St = ARM_AM::asr;
4185  else if (ShiftName == "ror" || ShiftName == "ROR")
4186    St = ARM_AM::ror;
4187  else if (ShiftName == "rrx" || ShiftName == "RRX")
4188    St = ARM_AM::rrx;
4189  else
4190    return Error(Loc, "illegal shift operator");
4191  Parser.Lex(); // Eat shift type token.
4192
4193  // rrx stands alone.
4194  Amount = 0;
4195  if (St != ARM_AM::rrx) {
4196    Loc = Parser.getTok().getLoc();
4197    // A '#' and a shift amount.
4198    const AsmToken &HashTok = Parser.getTok();
4199    if (HashTok.isNot(AsmToken::Hash) &&
4200        HashTok.isNot(AsmToken::Dollar))
4201      return Error(HashTok.getLoc(), "'#' expected");
4202    Parser.Lex(); // Eat hash token.
4203
4204    const MCExpr *Expr;
4205    if (getParser().ParseExpression(Expr))
4206      return true;
4207    // Range check the immediate.
4208    // lsl, ror: 0 <= imm <= 31
4209    // lsr, asr: 0 <= imm <= 32
4210    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4211    if (!CE)
4212      return Error(Loc, "shift amount must be an immediate");
4213    int64_t Imm = CE->getValue();
4214    if (Imm < 0 ||
4215        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4216        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4217      return Error(Loc, "immediate shift value out of range");
4218    Amount = Imm;
4219  }
4220
4221  return false;
4222}
4223
4224/// parseFPImm - A floating point immediate expression operand.
4225ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4226parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4227  SMLoc S = Parser.getTok().getLoc();
4228
4229  if (Parser.getTok().isNot(AsmToken::Hash) &&
4230      Parser.getTok().isNot(AsmToken::Dollar))
4231    return MatchOperand_NoMatch;
4232
4233  // Disambiguate the VMOV forms that can accept an FP immediate.
4234  // vmov.f32 <sreg>, #imm
4235  // vmov.f64 <dreg>, #imm
4236  // vmov.f32 <dreg>, #imm  @ vector f32x2
4237  // vmov.f32 <qreg>, #imm  @ vector f32x4
4238  //
4239  // There are also the NEON VMOV instructions which expect an
4240  // integer constant. Make sure we don't try to parse an FPImm
4241  // for these:
4242  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4243  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4244  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4245                           TyOp->getToken() != ".f64"))
4246    return MatchOperand_NoMatch;
4247
4248  Parser.Lex(); // Eat the '#'.
4249
4250  // Handle negation, as that still comes through as a separate token.
4251  bool isNegative = false;
4252  if (Parser.getTok().is(AsmToken::Minus)) {
4253    isNegative = true;
4254    Parser.Lex();
4255  }
4256  const AsmToken &Tok = Parser.getTok();
4257  if (Tok.is(AsmToken::Real)) {
4258    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4259    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4260    // If we had a '-' in front, toggle the sign bit.
4261    IntVal ^= (uint64_t)isNegative << 63;
4262    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4263    Parser.Lex(); // Eat the token.
4264    if (Val == -1) {
4265      TokError("floating point value out of range");
4266      return MatchOperand_ParseFail;
4267    }
4268    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4269    return MatchOperand_Success;
4270  }
4271  if (Tok.is(AsmToken::Integer)) {
4272    int64_t Val = Tok.getIntVal();
4273    Parser.Lex(); // Eat the token.
4274    if (Val > 255 || Val < 0) {
4275      TokError("encoded floating point value out of range");
4276      return MatchOperand_ParseFail;
4277    }
4278    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4279    return MatchOperand_Success;
4280  }
4281
4282  TokError("invalid floating point immediate");
4283  return MatchOperand_ParseFail;
4284}
4285/// Parse a arm instruction operand.  For now this parses the operand regardless
4286/// of the mnemonic.
4287bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4288                                StringRef Mnemonic) {
4289  SMLoc S, E;
4290
4291  // Check if the current operand has a custom associated parser, if so, try to
4292  // custom parse the operand, or fallback to the general approach.
4293  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4294  if (ResTy == MatchOperand_Success)
4295    return false;
4296  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4297  // there was a match, but an error occurred, in which case, just return that
4298  // the operand parsing failed.
4299  if (ResTy == MatchOperand_ParseFail)
4300    return true;
4301
4302  switch (getLexer().getKind()) {
4303  default:
4304    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4305    return true;
4306  case AsmToken::Identifier: {
4307    if (!tryParseRegisterWithWriteBack(Operands))
4308      return false;
4309    int Res = tryParseShiftRegister(Operands);
4310    if (Res == 0) // success
4311      return false;
4312    else if (Res == -1) // irrecoverable error
4313      return true;
4314    // If this is VMRS, check for the apsr_nzcv operand.
4315    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4316      S = Parser.getTok().getLoc();
4317      Parser.Lex();
4318      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4319      return false;
4320    }
4321
4322    // Fall though for the Identifier case that is not a register or a
4323    // special name.
4324  }
4325  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4326  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4327  case AsmToken::String:  // quoted label names.
4328  case AsmToken::Dot: {   // . as a branch target
4329    // This was not a register so parse other operands that start with an
4330    // identifier (like labels) as expressions and create them as immediates.
4331    const MCExpr *IdVal;
4332    S = Parser.getTok().getLoc();
4333    if (getParser().ParseExpression(IdVal))
4334      return true;
4335    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4336    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4337    return false;
4338  }
4339  case AsmToken::LBrac:
4340    return parseMemory(Operands);
4341  case AsmToken::LCurly:
4342    return parseRegisterList(Operands);
4343  case AsmToken::Dollar:
4344  case AsmToken::Hash: {
4345    // #42 -> immediate.
4346    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4347    S = Parser.getTok().getLoc();
4348    Parser.Lex();
4349    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4350    const MCExpr *ImmVal;
4351    if (getParser().ParseExpression(ImmVal))
4352      return true;
4353    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4354    if (CE) {
4355      int32_t Val = CE->getValue();
4356      if (isNegative && Val == 0)
4357        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4358    }
4359    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4360    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4361    return false;
4362  }
4363  case AsmToken::Colon: {
4364    // ":lower16:" and ":upper16:" expression prefixes
4365    // FIXME: Check it's an expression prefix,
4366    // e.g. (FOO - :lower16:BAR) isn't legal.
4367    ARMMCExpr::VariantKind RefKind;
4368    if (parsePrefix(RefKind))
4369      return true;
4370
4371    const MCExpr *SubExprVal;
4372    if (getParser().ParseExpression(SubExprVal))
4373      return true;
4374
4375    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4376                                                   getContext());
4377    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4378    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4379    return false;
4380  }
4381  }
4382}
4383
4384// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4385//  :lower16: and :upper16:.
4386bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4387  RefKind = ARMMCExpr::VK_ARM_None;
4388
4389  // :lower16: and :upper16: modifiers
4390  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4391  Parser.Lex(); // Eat ':'
4392
4393  if (getLexer().isNot(AsmToken::Identifier)) {
4394    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4395    return true;
4396  }
4397
4398  StringRef IDVal = Parser.getTok().getIdentifier();
4399  if (IDVal == "lower16") {
4400    RefKind = ARMMCExpr::VK_ARM_LO16;
4401  } else if (IDVal == "upper16") {
4402    RefKind = ARMMCExpr::VK_ARM_HI16;
4403  } else {
4404    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4405    return true;
4406  }
4407  Parser.Lex();
4408
4409  if (getLexer().isNot(AsmToken::Colon)) {
4410    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4411    return true;
4412  }
4413  Parser.Lex(); // Eat the last ':'
4414  return false;
4415}
4416
4417/// \brief Given a mnemonic, split out possible predication code and carry
4418/// setting letters to form a canonical mnemonic and flags.
4419//
4420// FIXME: Would be nice to autogen this.
4421// FIXME: This is a bit of a maze of special cases.
4422StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4423                                      unsigned &PredicationCode,
4424                                      bool &CarrySetting,
4425                                      unsigned &ProcessorIMod,
4426                                      StringRef &ITMask) {
4427  PredicationCode = ARMCC::AL;
4428  CarrySetting = false;
4429  ProcessorIMod = 0;
4430
4431  // Ignore some mnemonics we know aren't predicated forms.
4432  //
4433  // FIXME: Would be nice to autogen this.
4434  if ((Mnemonic == "movs" && isThumb()) ||
4435      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4436      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4437      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4438      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4439      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4440      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4441      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4442      Mnemonic == "fmuls")
4443    return Mnemonic;
4444
4445  // First, split out any predication code. Ignore mnemonics we know aren't
4446  // predicated but do have a carry-set and so weren't caught above.
4447  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4448      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4449      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4450      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4451    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4452      .Case("eq", ARMCC::EQ)
4453      .Case("ne", ARMCC::NE)
4454      .Case("hs", ARMCC::HS)
4455      .Case("cs", ARMCC::HS)
4456      .Case("lo", ARMCC::LO)
4457      .Case("cc", ARMCC::LO)
4458      .Case("mi", ARMCC::MI)
4459      .Case("pl", ARMCC::PL)
4460      .Case("vs", ARMCC::VS)
4461      .Case("vc", ARMCC::VC)
4462      .Case("hi", ARMCC::HI)
4463      .Case("ls", ARMCC::LS)
4464      .Case("ge", ARMCC::GE)
4465      .Case("lt", ARMCC::LT)
4466      .Case("gt", ARMCC::GT)
4467      .Case("le", ARMCC::LE)
4468      .Case("al", ARMCC::AL)
4469      .Default(~0U);
4470    if (CC != ~0U) {
4471      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4472      PredicationCode = CC;
4473    }
4474  }
4475
4476  // Next, determine if we have a carry setting bit. We explicitly ignore all
4477  // the instructions we know end in 's'.
4478  if (Mnemonic.endswith("s") &&
4479      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4480        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4481        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4482        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4483        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4484        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4485        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4486        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4487        (Mnemonic == "movs" && isThumb()))) {
4488    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4489    CarrySetting = true;
4490  }
4491
4492  // The "cps" instruction can have a interrupt mode operand which is glued into
4493  // the mnemonic. Check if this is the case, split it and parse the imod op
4494  if (Mnemonic.startswith("cps")) {
4495    // Split out any imod code.
4496    unsigned IMod =
4497      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4498      .Case("ie", ARM_PROC::IE)
4499      .Case("id", ARM_PROC::ID)
4500      .Default(~0U);
4501    if (IMod != ~0U) {
4502      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4503      ProcessorIMod = IMod;
4504    }
4505  }
4506
4507  // The "it" instruction has the condition mask on the end of the mnemonic.
4508  if (Mnemonic.startswith("it")) {
4509    ITMask = Mnemonic.slice(2, Mnemonic.size());
4510    Mnemonic = Mnemonic.slice(0, 2);
4511  }
4512
4513  return Mnemonic;
4514}
4515
4516/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4517/// inclusion of carry set or predication code operands.
4518//
4519// FIXME: It would be nice to autogen this.
4520void ARMAsmParser::
4521getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4522                      bool &CanAcceptPredicationCode) {
4523  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4524      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4525      Mnemonic == "add" || Mnemonic == "adc" ||
4526      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4527      Mnemonic == "orr" || Mnemonic == "mvn" ||
4528      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4529      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4530      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4531                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4532                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4533    CanAcceptCarrySet = true;
4534  } else
4535    CanAcceptCarrySet = false;
4536
4537  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4538      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4539      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4540      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4541      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4542      (Mnemonic == "clrex" && !isThumb()) ||
4543      (Mnemonic == "nop" && isThumbOne()) ||
4544      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4545        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4546        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4547      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4548       !isThumb()) ||
4549      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4550    CanAcceptPredicationCode = false;
4551  } else
4552    CanAcceptPredicationCode = true;
4553
4554  if (isThumb()) {
4555    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4556        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4557      CanAcceptPredicationCode = false;
4558  }
4559}
4560
4561bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4562                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4563  // FIXME: This is all horribly hacky. We really need a better way to deal
4564  // with optional operands like this in the matcher table.
4565
4566  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4567  // another does not. Specifically, the MOVW instruction does not. So we
4568  // special case it here and remove the defaulted (non-setting) cc_out
4569  // operand if that's the instruction we're trying to match.
4570  //
4571  // We do this as post-processing of the explicit operands rather than just
4572  // conditionally adding the cc_out in the first place because we need
4573  // to check the type of the parsed immediate operand.
4574  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4575      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4576      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4577      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4578    return true;
4579
4580  // Register-register 'add' for thumb does not have a cc_out operand
4581  // when there are only two register operands.
4582  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4583      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4584      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4585      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4586    return true;
4587  // Register-register 'add' for thumb does not have a cc_out operand
4588  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4589  // have to check the immediate range here since Thumb2 has a variant
4590  // that can handle a different range and has a cc_out operand.
4591  if (((isThumb() && Mnemonic == "add") ||
4592       (isThumbTwo() && Mnemonic == "sub")) &&
4593      Operands.size() == 6 &&
4594      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4595      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4596      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4597      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4598      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4599       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4600    return true;
4601  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4602  // imm0_4095 variant. That's the least-preferred variant when
4603  // selecting via the generic "add" mnemonic, so to know that we
4604  // should remove the cc_out operand, we have to explicitly check that
4605  // it's not one of the other variants. Ugh.
4606  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4607      Operands.size() == 6 &&
4608      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4609      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4610      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4611    // Nest conditions rather than one big 'if' statement for readability.
4612    //
4613    // If either register is a high reg, it's either one of the SP
4614    // variants (handled above) or a 32-bit encoding, so we just
4615    // check against T3.
4616    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4617         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4618        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4619      return false;
4620    // If both registers are low, we're in an IT block, and the immediate is
4621    // in range, we should use encoding T1 instead, which has a cc_out.
4622    if (inITBlock() &&
4623        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4624        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4625        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4626      return false;
4627
4628    // Otherwise, we use encoding T4, which does not have a cc_out
4629    // operand.
4630    return true;
4631  }
4632
4633  // The thumb2 multiply instruction doesn't have a CCOut register, so
4634  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4635  // use the 16-bit encoding or not.
4636  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4637      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4638      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4639      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4640      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4641      // If the registers aren't low regs, the destination reg isn't the
4642      // same as one of the source regs, or the cc_out operand is zero
4643      // outside of an IT block, we have to use the 32-bit encoding, so
4644      // remove the cc_out operand.
4645      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4646       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4647       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4648       !inITBlock() ||
4649       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4650        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4651        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4652        static_cast<ARMOperand*>(Operands[4])->getReg())))
4653    return true;
4654
4655  // Also check the 'mul' syntax variant that doesn't specify an explicit
4656  // destination register.
4657  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4658      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4659      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4660      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4661      // If the registers aren't low regs  or the cc_out operand is zero
4662      // outside of an IT block, we have to use the 32-bit encoding, so
4663      // remove the cc_out operand.
4664      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4665       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4666       !inITBlock()))
4667    return true;
4668
4669
4670
4671  // Register-register 'add/sub' for thumb does not have a cc_out operand
4672  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4673  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4674  // right, this will result in better diagnostics (which operand is off)
4675  // anyway.
4676  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4677      (Operands.size() == 5 || Operands.size() == 6) &&
4678      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4679      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4680      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4681    return true;
4682
4683  return false;
4684}
4685
4686static bool isDataTypeToken(StringRef Tok) {
4687  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4688    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4689    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4690    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4691    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4692    Tok == ".f" || Tok == ".d";
4693}
4694
4695// FIXME: This bit should probably be handled via an explicit match class
4696// in the .td files that matches the suffix instead of having it be
4697// a literal string token the way it is now.
4698static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4699  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4700}
4701
4702static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4703/// Parse an arm instruction mnemonic followed by its operands.
4704bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4705                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4706  // Apply mnemonic aliases before doing anything else, as the destination
4707  // mnemnonic may include suffices and we want to handle them normally.
4708  // The generic tblgen'erated code does this later, at the start of
4709  // MatchInstructionImpl(), but that's too late for aliases that include
4710  // any sort of suffix.
4711  unsigned AvailableFeatures = getAvailableFeatures();
4712  applyMnemonicAliases(Name, AvailableFeatures);
4713
4714  // First check for the ARM-specific .req directive.
4715  if (Parser.getTok().is(AsmToken::Identifier) &&
4716      Parser.getTok().getIdentifier() == ".req") {
4717    parseDirectiveReq(Name, NameLoc);
4718    // We always return 'error' for this, as we're done with this
4719    // statement and don't need to match the 'instruction."
4720    return true;
4721  }
4722
4723  // Create the leading tokens for the mnemonic, split by '.' characters.
4724  size_t Start = 0, Next = Name.find('.');
4725  StringRef Mnemonic = Name.slice(Start, Next);
4726
4727  // Split out the predication code and carry setting flag from the mnemonic.
4728  unsigned PredicationCode;
4729  unsigned ProcessorIMod;
4730  bool CarrySetting;
4731  StringRef ITMask;
4732  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4733                           ProcessorIMod, ITMask);
4734
4735  // In Thumb1, only the branch (B) instruction can be predicated.
4736  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4737    Parser.EatToEndOfStatement();
4738    return Error(NameLoc, "conditional execution not supported in Thumb1");
4739  }
4740
4741  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4742
4743  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4744  // is the mask as it will be for the IT encoding if the conditional
4745  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4746  // where the conditional bit0 is zero, the instruction post-processing
4747  // will adjust the mask accordingly.
4748  if (Mnemonic == "it") {
4749    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4750    if (ITMask.size() > 3) {
4751      Parser.EatToEndOfStatement();
4752      return Error(Loc, "too many conditions on IT instruction");
4753    }
4754    unsigned Mask = 8;
4755    for (unsigned i = ITMask.size(); i != 0; --i) {
4756      char pos = ITMask[i - 1];
4757      if (pos != 't' && pos != 'e') {
4758        Parser.EatToEndOfStatement();
4759        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4760      }
4761      Mask >>= 1;
4762      if (ITMask[i - 1] == 't')
4763        Mask |= 8;
4764    }
4765    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4766  }
4767
4768  // FIXME: This is all a pretty gross hack. We should automatically handle
4769  // optional operands like this via tblgen.
4770
4771  // Next, add the CCOut and ConditionCode operands, if needed.
4772  //
4773  // For mnemonics which can ever incorporate a carry setting bit or predication
4774  // code, our matching model involves us always generating CCOut and
4775  // ConditionCode operands to match the mnemonic "as written" and then we let
4776  // the matcher deal with finding the right instruction or generating an
4777  // appropriate error.
4778  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4779  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4780
4781  // If we had a carry-set on an instruction that can't do that, issue an
4782  // error.
4783  if (!CanAcceptCarrySet && CarrySetting) {
4784    Parser.EatToEndOfStatement();
4785    return Error(NameLoc, "instruction '" + Mnemonic +
4786                 "' can not set flags, but 's' suffix specified");
4787  }
4788  // If we had a predication code on an instruction that can't do that, issue an
4789  // error.
4790  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4791    Parser.EatToEndOfStatement();
4792    return Error(NameLoc, "instruction '" + Mnemonic +
4793                 "' is not predicable, but condition code specified");
4794  }
4795
4796  // Add the carry setting operand, if necessary.
4797  if (CanAcceptCarrySet) {
4798    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4799    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4800                                               Loc));
4801  }
4802
4803  // Add the predication code operand, if necessary.
4804  if (CanAcceptPredicationCode) {
4805    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4806                                      CarrySetting);
4807    Operands.push_back(ARMOperand::CreateCondCode(
4808                         ARMCC::CondCodes(PredicationCode), Loc));
4809  }
4810
4811  // Add the processor imod operand, if necessary.
4812  if (ProcessorIMod) {
4813    Operands.push_back(ARMOperand::CreateImm(
4814          MCConstantExpr::Create(ProcessorIMod, getContext()),
4815                                 NameLoc, NameLoc));
4816  }
4817
4818  // Add the remaining tokens in the mnemonic.
4819  while (Next != StringRef::npos) {
4820    Start = Next;
4821    Next = Name.find('.', Start + 1);
4822    StringRef ExtraToken = Name.slice(Start, Next);
4823
4824    // Some NEON instructions have an optional datatype suffix that is
4825    // completely ignored. Check for that.
4826    if (isDataTypeToken(ExtraToken) &&
4827        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4828      continue;
4829
4830    if (ExtraToken != ".n") {
4831      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4832      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4833    }
4834  }
4835
4836  // Read the remaining operands.
4837  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4838    // Read the first operand.
4839    if (parseOperand(Operands, Mnemonic)) {
4840      Parser.EatToEndOfStatement();
4841      return true;
4842    }
4843
4844    while (getLexer().is(AsmToken::Comma)) {
4845      Parser.Lex();  // Eat the comma.
4846
4847      // Parse and remember the operand.
4848      if (parseOperand(Operands, Mnemonic)) {
4849        Parser.EatToEndOfStatement();
4850        return true;
4851      }
4852    }
4853  }
4854
4855  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4856    SMLoc Loc = getLexer().getLoc();
4857    Parser.EatToEndOfStatement();
4858    return Error(Loc, "unexpected token in argument list");
4859  }
4860
4861  Parser.Lex(); // Consume the EndOfStatement
4862
4863  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4864  // do and don't have a cc_out optional-def operand. With some spot-checks
4865  // of the operand list, we can figure out which variant we're trying to
4866  // parse and adjust accordingly before actually matching. We shouldn't ever
4867  // try to remove a cc_out operand that was explicitly set on the the
4868  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4869  // table driven matcher doesn't fit well with the ARM instruction set.
4870  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4871    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4872    Operands.erase(Operands.begin() + 1);
4873    delete Op;
4874  }
4875
4876  // ARM mode 'blx' need special handling, as the register operand version
4877  // is predicable, but the label operand version is not. So, we can't rely
4878  // on the Mnemonic based checking to correctly figure out when to put
4879  // a k_CondCode operand in the list. If we're trying to match the label
4880  // version, remove the k_CondCode operand here.
4881  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4882      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4883    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4884    Operands.erase(Operands.begin() + 1);
4885    delete Op;
4886  }
4887
4888  // The vector-compare-to-zero instructions have a literal token "#0" at
4889  // the end that comes to here as an immediate operand. Convert it to a
4890  // token to play nicely with the matcher.
4891  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4892      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4893      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4894    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4895    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4896    if (CE && CE->getValue() == 0) {
4897      Operands.erase(Operands.begin() + 5);
4898      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4899      delete Op;
4900    }
4901  }
4902  // VCMP{E} does the same thing, but with a different operand count.
4903  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4904      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4905    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4906    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4907    if (CE && CE->getValue() == 0) {
4908      Operands.erase(Operands.begin() + 4);
4909      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4910      delete Op;
4911    }
4912  }
4913  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4914  // end. Convert it to a token here. Take care not to convert those
4915  // that should hit the Thumb2 encoding.
4916  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4917      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4918      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4919      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4920    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4921    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4922    if (CE && CE->getValue() == 0 &&
4923        (isThumbOne() ||
4924         // The cc_out operand matches the IT block.
4925         ((inITBlock() != CarrySetting) &&
4926         // Neither register operand is a high register.
4927         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4928          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4929      Operands.erase(Operands.begin() + 5);
4930      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4931      delete Op;
4932    }
4933  }
4934
4935  return false;
4936}
4937
4938// Validate context-sensitive operand constraints.
4939
4940// return 'true' if register list contains non-low GPR registers,
4941// 'false' otherwise. If Reg is in the register list or is HiReg, set
4942// 'containsReg' to true.
4943static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4944                                 unsigned HiReg, bool &containsReg) {
4945  containsReg = false;
4946  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4947    unsigned OpReg = Inst.getOperand(i).getReg();
4948    if (OpReg == Reg)
4949      containsReg = true;
4950    // Anything other than a low register isn't legal here.
4951    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4952      return true;
4953  }
4954  return false;
4955}
4956
4957// Check if the specified regisgter is in the register list of the inst,
4958// starting at the indicated operand number.
4959static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4960  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4961    unsigned OpReg = Inst.getOperand(i).getReg();
4962    if (OpReg == Reg)
4963      return true;
4964  }
4965  return false;
4966}
4967
4968// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4969// the ARMInsts array) instead. Getting that here requires awkward
4970// API changes, though. Better way?
4971namespace llvm {
4972extern const MCInstrDesc ARMInsts[];
4973}
4974static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4975  return ARMInsts[Opcode];
4976}
4977
4978// FIXME: We would really like to be able to tablegen'erate this.
4979bool ARMAsmParser::
4980validateInstruction(MCInst &Inst,
4981                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4982  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4983  SMLoc Loc = Operands[0]->getStartLoc();
4984  // Check the IT block state first.
4985  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4986  // being allowed in IT blocks, but not being predicable.  It just always
4987  // executes.
4988  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4989    unsigned bit = 1;
4990    if (ITState.FirstCond)
4991      ITState.FirstCond = false;
4992    else
4993      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4994    // The instruction must be predicable.
4995    if (!MCID.isPredicable())
4996      return Error(Loc, "instructions in IT block must be predicable");
4997    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4998    unsigned ITCond = bit ? ITState.Cond :
4999      ARMCC::getOppositeCondition(ITState.Cond);
5000    if (Cond != ITCond) {
5001      // Find the condition code Operand to get its SMLoc information.
5002      SMLoc CondLoc;
5003      for (unsigned i = 1; i < Operands.size(); ++i)
5004        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5005          CondLoc = Operands[i]->getStartLoc();
5006      return Error(CondLoc, "incorrect condition in IT block; got '" +
5007                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5008                   "', but expected '" +
5009                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5010    }
5011  // Check for non-'al' condition codes outside of the IT block.
5012  } else if (isThumbTwo() && MCID.isPredicable() &&
5013             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5014             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5015             Inst.getOpcode() != ARM::t2B)
5016    return Error(Loc, "predicated instructions must be in IT block");
5017
5018  switch (Inst.getOpcode()) {
5019  case ARM::LDRD:
5020  case ARM::LDRD_PRE:
5021  case ARM::LDRD_POST:
5022  case ARM::LDREXD: {
5023    // Rt2 must be Rt + 1.
5024    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5025    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5026    if (Rt2 != Rt + 1)
5027      return Error(Operands[3]->getStartLoc(),
5028                   "destination operands must be sequential");
5029    return false;
5030  }
5031  case ARM::STRD: {
5032    // Rt2 must be Rt + 1.
5033    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5034    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5035    if (Rt2 != Rt + 1)
5036      return Error(Operands[3]->getStartLoc(),
5037                   "source operands must be sequential");
5038    return false;
5039  }
5040  case ARM::STRD_PRE:
5041  case ARM::STRD_POST:
5042  case ARM::STREXD: {
5043    // Rt2 must be Rt + 1.
5044    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5045    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5046    if (Rt2 != Rt + 1)
5047      return Error(Operands[3]->getStartLoc(),
5048                   "source operands must be sequential");
5049    return false;
5050  }
5051  case ARM::SBFX:
5052  case ARM::UBFX: {
5053    // width must be in range [1, 32-lsb]
5054    unsigned lsb = Inst.getOperand(2).getImm();
5055    unsigned widthm1 = Inst.getOperand(3).getImm();
5056    if (widthm1 >= 32 - lsb)
5057      return Error(Operands[5]->getStartLoc(),
5058                   "bitfield width must be in range [1,32-lsb]");
5059    return false;
5060  }
5061  case ARM::tLDMIA: {
5062    // If we're parsing Thumb2, the .w variant is available and handles
5063    // most cases that are normally illegal for a Thumb1 LDM
5064    // instruction. We'll make the transformation in processInstruction()
5065    // if necessary.
5066    //
5067    // Thumb LDM instructions are writeback iff the base register is not
5068    // in the register list.
5069    unsigned Rn = Inst.getOperand(0).getReg();
5070    bool hasWritebackToken =
5071      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5072       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5073    bool listContainsBase;
5074    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5075      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5076                   "registers must be in range r0-r7");
5077    // If we should have writeback, then there should be a '!' token.
5078    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5079      return Error(Operands[2]->getStartLoc(),
5080                   "writeback operator '!' expected");
5081    // If we should not have writeback, there must not be a '!'. This is
5082    // true even for the 32-bit wide encodings.
5083    if (listContainsBase && hasWritebackToken)
5084      return Error(Operands[3]->getStartLoc(),
5085                   "writeback operator '!' not allowed when base register "
5086                   "in register list");
5087
5088    break;
5089  }
5090  case ARM::t2LDMIA_UPD: {
5091    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5092      return Error(Operands[4]->getStartLoc(),
5093                   "writeback operator '!' not allowed when base register "
5094                   "in register list");
5095    break;
5096  }
5097  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5098  // so only issue a diagnostic for thumb1. The instructions will be
5099  // switched to the t2 encodings in processInstruction() if necessary.
5100  case ARM::tPOP: {
5101    bool listContainsBase;
5102    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5103        !isThumbTwo())
5104      return Error(Operands[2]->getStartLoc(),
5105                   "registers must be in range r0-r7 or pc");
5106    break;
5107  }
5108  case ARM::tPUSH: {
5109    bool listContainsBase;
5110    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5111        !isThumbTwo())
5112      return Error(Operands[2]->getStartLoc(),
5113                   "registers must be in range r0-r7 or lr");
5114    break;
5115  }
5116  case ARM::tSTMIA_UPD: {
5117    bool listContainsBase;
5118    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5119      return Error(Operands[4]->getStartLoc(),
5120                   "registers must be in range r0-r7");
5121    break;
5122  }
5123  }
5124
5125  return false;
5126}
5127
5128static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5129  switch(Opc) {
5130  default: assert(0 && "unexpected opcode!");
5131  // VST1LN
5132  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5133  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5134  case ARM::VST1LNdWB_fixed_Asm_U8:
5135    Spacing = 1;
5136    return ARM::VST1LNd8_UPD;
5137  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5138  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5139  case ARM::VST1LNdWB_fixed_Asm_U16:
5140    Spacing = 1;
5141    return ARM::VST1LNd16_UPD;
5142  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5143  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5144  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5145    Spacing = 1;
5146    return ARM::VST1LNd32_UPD;
5147  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5148  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5149  case ARM::VST1LNdWB_register_Asm_U8:
5150    Spacing = 1;
5151    return ARM::VST1LNd8_UPD;
5152  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5153  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5154  case ARM::VST1LNdWB_register_Asm_U16:
5155    Spacing = 1;
5156    return ARM::VST1LNd16_UPD;
5157  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5158  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5159  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5160    Spacing = 1;
5161    return ARM::VST1LNd32_UPD;
5162  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5163  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5164  case ARM::VST1LNdAsm_U8:
5165    Spacing = 1;
5166    return ARM::VST1LNd8;
5167  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5168  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5169  case ARM::VST1LNdAsm_U16:
5170    Spacing = 1;
5171    return ARM::VST1LNd16;
5172  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5173  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5174  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5175    Spacing = 1;
5176    return ARM::VST1LNd32;
5177
5178  // VST2LN
5179  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5180  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5181  case ARM::VST2LNdWB_fixed_Asm_U8:
5182    Spacing = 1;
5183    return ARM::VST2LNd8_UPD;
5184  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5185  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5186  case ARM::VST2LNdWB_fixed_Asm_U16:
5187    Spacing = 1;
5188    return ARM::VST2LNd16_UPD;
5189  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5190  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5191  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5192    Spacing = 1;
5193    return ARM::VST2LNd32_UPD;
5194  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5195  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5196  case ARM::VST2LNqWB_fixed_Asm_U16:
5197    Spacing = 2;
5198    return ARM::VST2LNq16_UPD;
5199  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5200  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5201  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5202    Spacing = 2;
5203    return ARM::VST2LNq32_UPD;
5204
5205  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5206  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5207  case ARM::VST2LNdWB_register_Asm_U8:
5208    Spacing = 1;
5209    return ARM::VST2LNd8_UPD;
5210  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5211  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5212  case ARM::VST2LNdWB_register_Asm_U16:
5213    Spacing = 1;
5214    return ARM::VST2LNd16_UPD;
5215  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5216  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5217  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5218    Spacing = 1;
5219    return ARM::VST2LNd32_UPD;
5220  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5221  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5222  case ARM::VST2LNqWB_register_Asm_U16:
5223    Spacing = 2;
5224    return ARM::VST2LNq16_UPD;
5225  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5226  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5227  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5228    Spacing = 2;
5229    return ARM::VST2LNq32_UPD;
5230
5231  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5232  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5233  case ARM::VST2LNdAsm_U8:
5234    Spacing = 1;
5235    return ARM::VST2LNd8;
5236  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5237  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5238  case ARM::VST2LNdAsm_U16:
5239    Spacing = 1;
5240    return ARM::VST2LNd16;
5241  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5242  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5243  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5244    Spacing = 1;
5245    return ARM::VST2LNd32;
5246  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5247  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5248  case ARM::VST2LNqAsm_U16:
5249    Spacing = 2;
5250    return ARM::VST2LNq16;
5251  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5252  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5253  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5254    Spacing = 2;
5255    return ARM::VST2LNq32;
5256  }
5257}
5258
5259static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5260  switch(Opc) {
5261  default: assert(0 && "unexpected opcode!");
5262  // VLD1LN
5263  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5264  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5265  case ARM::VLD1LNdWB_fixed_Asm_U8:
5266    Spacing = 1;
5267    return ARM::VLD1LNd8_UPD;
5268  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5269  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5270  case ARM::VLD1LNdWB_fixed_Asm_U16:
5271    Spacing = 1;
5272    return ARM::VLD1LNd16_UPD;
5273  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5274  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5275  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5276    Spacing = 1;
5277    return ARM::VLD1LNd32_UPD;
5278  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5279  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5280  case ARM::VLD1LNdWB_register_Asm_U8:
5281    Spacing = 1;
5282    return ARM::VLD1LNd8_UPD;
5283  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5284  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5285  case ARM::VLD1LNdWB_register_Asm_U16:
5286    Spacing = 1;
5287    return ARM::VLD1LNd16_UPD;
5288  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5289  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5290  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5291    Spacing = 1;
5292    return ARM::VLD1LNd32_UPD;
5293  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5294  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5295  case ARM::VLD1LNdAsm_U8:
5296    Spacing = 1;
5297    return ARM::VLD1LNd8;
5298  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5299  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5300  case ARM::VLD1LNdAsm_U16:
5301    Spacing = 1;
5302    return ARM::VLD1LNd16;
5303  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5304  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5305  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5306    Spacing = 1;
5307    return ARM::VLD1LNd32;
5308
5309  // VLD2LN
5310  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5311  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5312  case ARM::VLD2LNdWB_fixed_Asm_U8:
5313    Spacing = 1;
5314    return ARM::VLD2LNd8_UPD;
5315  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5316  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5317  case ARM::VLD2LNdWB_fixed_Asm_U16:
5318    Spacing = 1;
5319    return ARM::VLD2LNd16_UPD;
5320  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5321  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5322  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5323    Spacing = 1;
5324    return ARM::VLD2LNd32_UPD;
5325  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5326  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5327  case ARM::VLD2LNqWB_fixed_Asm_U16:
5328    Spacing = 1;
5329    return ARM::VLD2LNq16_UPD;
5330  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5331  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5332  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5333    Spacing = 2;
5334    return ARM::VLD2LNq32_UPD;
5335  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5336  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5337  case ARM::VLD2LNdWB_register_Asm_U8:
5338    Spacing = 1;
5339    return ARM::VLD2LNd8_UPD;
5340  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5341  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5342  case ARM::VLD2LNdWB_register_Asm_U16:
5343    Spacing = 1;
5344    return ARM::VLD2LNd16_UPD;
5345  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5346  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5347  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5348    Spacing = 1;
5349    return ARM::VLD2LNd32_UPD;
5350  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5351  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5352  case ARM::VLD2LNqWB_register_Asm_U16:
5353    Spacing = 2;
5354    return ARM::VLD2LNq16_UPD;
5355  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5356  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5357  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5358    Spacing = 2;
5359    return ARM::VLD2LNq32_UPD;
5360  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5361  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5362  case ARM::VLD2LNdAsm_U8:
5363    Spacing = 1;
5364    return ARM::VLD2LNd8;
5365  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5366  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5367  case ARM::VLD2LNdAsm_U16:
5368    Spacing = 1;
5369    return ARM::VLD2LNd16;
5370  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5371  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5372  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5373    Spacing = 1;
5374    return ARM::VLD2LNd32;
5375  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5376  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5377  case ARM::VLD2LNqAsm_U16:
5378    Spacing = 2;
5379    return ARM::VLD2LNq16;
5380  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5381  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5382  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5383    Spacing = 2;
5384    return ARM::VLD2LNq32;
5385  }
5386}
5387
5388bool ARMAsmParser::
5389processInstruction(MCInst &Inst,
5390                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5391  switch (Inst.getOpcode()) {
5392  // Handle NEON VST complex aliases.
5393  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5394  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5395  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5396  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5397  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5398  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5399  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5400  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5401    MCInst TmpInst;
5402    // Shuffle the operands around so the lane index operand is in the
5403    // right place.
5404    unsigned Spacing;
5405    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5406    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5407    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5408    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5409    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5410    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5411    TmpInst.addOperand(Inst.getOperand(1)); // lane
5412    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5413    TmpInst.addOperand(Inst.getOperand(6));
5414    Inst = TmpInst;
5415    return true;
5416  }
5417
5418  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5419  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5420  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5421  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5422  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5423  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5424  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5425  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5426  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5427  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5428  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5429  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5430  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5431  case ARM::VST2LNqWB_register_Asm_U32: {
5432    MCInst TmpInst;
5433    // Shuffle the operands around so the lane index operand is in the
5434    // right place.
5435    unsigned Spacing;
5436    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5437    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5438    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5439    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5440    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5441    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5442    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5443                                            Spacing));
5444    TmpInst.addOperand(Inst.getOperand(1)); // lane
5445    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5446    TmpInst.addOperand(Inst.getOperand(6));
5447    Inst = TmpInst;
5448    return true;
5449  }
5450  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5451  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5452  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5453  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5454  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5455  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5456  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5457  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5458    MCInst TmpInst;
5459    // Shuffle the operands around so the lane index operand is in the
5460    // right place.
5461    unsigned Spacing;
5462    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5463    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5464    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5465    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5466    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5467    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5468    TmpInst.addOperand(Inst.getOperand(1)); // lane
5469    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5470    TmpInst.addOperand(Inst.getOperand(5));
5471    Inst = TmpInst;
5472    return true;
5473  }
5474
5475  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5476  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5477  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5478  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5479  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5480  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5481  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5482  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5483  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5484  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5485  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5486  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5487  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5488  case ARM::VST2LNqWB_fixed_Asm_U32: {
5489    MCInst TmpInst;
5490    // Shuffle the operands around so the lane index operand is in the
5491    // right place.
5492    unsigned Spacing;
5493    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5494    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5495    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5496    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5497    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5498    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5499    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5500                                            Spacing));
5501    TmpInst.addOperand(Inst.getOperand(1)); // lane
5502    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5503    TmpInst.addOperand(Inst.getOperand(5));
5504    Inst = TmpInst;
5505    return true;
5506  }
5507  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5508  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5509  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5510  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5511  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5512  case ARM::VST1LNdAsm_U32: {
5513    MCInst TmpInst;
5514    // Shuffle the operands around so the lane index operand is in the
5515    // right place.
5516    unsigned Spacing;
5517    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5518    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5519    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5520    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5521    TmpInst.addOperand(Inst.getOperand(1)); // lane
5522    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5523    TmpInst.addOperand(Inst.getOperand(5));
5524    Inst = TmpInst;
5525    return true;
5526  }
5527
5528  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5529  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5530  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5531  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5532  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5533  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5534  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5535  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5536  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5537    MCInst TmpInst;
5538    // Shuffle the operands around so the lane index operand is in the
5539    // right place.
5540    unsigned Spacing;
5541    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5542    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5543    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5544    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5545    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5546                                            Spacing));
5547    TmpInst.addOperand(Inst.getOperand(1)); // lane
5548    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5549    TmpInst.addOperand(Inst.getOperand(5));
5550    Inst = TmpInst;
5551    return true;
5552  }
5553  // Handle NEON VLD complex aliases.
5554  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5555  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5556  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5557  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5558  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5559  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5560  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5561  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5562    MCInst TmpInst;
5563    // Shuffle the operands around so the lane index operand is in the
5564    // right place.
5565    unsigned Spacing;
5566    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5567    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5568    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5569    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5570    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5571    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5572    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5573    TmpInst.addOperand(Inst.getOperand(1)); // lane
5574    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5575    TmpInst.addOperand(Inst.getOperand(6));
5576    Inst = TmpInst;
5577    return true;
5578  }
5579
5580  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5581  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5582  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5583  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5584  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5585  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5586  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5587  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5588  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5589  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5590  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5591  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5592  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5593  case ARM::VLD2LNqWB_register_Asm_U32: {
5594    MCInst TmpInst;
5595    // Shuffle the operands around so the lane index operand is in the
5596    // right place.
5597    unsigned Spacing;
5598    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5599    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5600    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5601                                            Spacing));
5602    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5603    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5604    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5605    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5606    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5607    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5608                                            Spacing));
5609    TmpInst.addOperand(Inst.getOperand(1)); // lane
5610    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5611    TmpInst.addOperand(Inst.getOperand(6));
5612    Inst = TmpInst;
5613    return true;
5614  }
5615
5616  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5617  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5618  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5619  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5620  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5621  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5622  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5623  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5624    MCInst TmpInst;
5625    // Shuffle the operands around so the lane index operand is in the
5626    // right place.
5627    unsigned Spacing;
5628    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5629    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5630    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5631    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5632    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5633    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5634    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5635    TmpInst.addOperand(Inst.getOperand(1)); // lane
5636    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5637    TmpInst.addOperand(Inst.getOperand(5));
5638    Inst = TmpInst;
5639    return true;
5640  }
5641
5642  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5643  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5644  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5645  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5646  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5647  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5648  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5649  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5650  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5651  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5652  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5653  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5654  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5655  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5656    MCInst TmpInst;
5657    // Shuffle the operands around so the lane index operand is in the
5658    // right place.
5659    unsigned Spacing;
5660    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5661    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5662    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5663                                            Spacing));
5664    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5665    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5666    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5667    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5668    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5669    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5670                                            Spacing));
5671    TmpInst.addOperand(Inst.getOperand(1)); // lane
5672    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5673    TmpInst.addOperand(Inst.getOperand(5));
5674    Inst = TmpInst;
5675    return true;
5676  }
5677
5678  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5679  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5680  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5681  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5682  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5683  case ARM::VLD1LNdAsm_U32: {
5684    MCInst TmpInst;
5685    // Shuffle the operands around so the lane index operand is in the
5686    // right place.
5687    unsigned Spacing;
5688    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5689    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5690    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5691    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5692    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5693    TmpInst.addOperand(Inst.getOperand(1)); // lane
5694    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5695    TmpInst.addOperand(Inst.getOperand(5));
5696    Inst = TmpInst;
5697    return true;
5698  }
5699
5700  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5701  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5702  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5703  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5704  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5705  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5706  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5707  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5708  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5709  case ARM::VLD2LNqAsm_U32: {
5710    MCInst TmpInst;
5711    // Shuffle the operands around so the lane index operand is in the
5712    // right place.
5713    unsigned Spacing;
5714    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5715    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5716    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5717                                            Spacing));
5718    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5719    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5720    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5721    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5722                                            Spacing));
5723    TmpInst.addOperand(Inst.getOperand(1)); // lane
5724    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5725    TmpInst.addOperand(Inst.getOperand(5));
5726    Inst = TmpInst;
5727    return true;
5728  }
5729  // Handle the Thumb2 mode MOV complex aliases.
5730  case ARM::t2MOVsr:
5731  case ARM::t2MOVSsr: {
5732    // Which instruction to expand to depends on the CCOut operand and
5733    // whether we're in an IT block if the register operands are low
5734    // registers.
5735    bool isNarrow = false;
5736    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5737        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5738        isARMLowRegister(Inst.getOperand(2).getReg()) &&
5739        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
5740        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
5741      isNarrow = true;
5742    MCInst TmpInst;
5743    unsigned newOpc;
5744    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
5745    default: llvm_unreachable("unexpected opcode!");
5746    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
5747    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
5748    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
5749    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
5750    }
5751    TmpInst.setOpcode(newOpc);
5752    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5753    if (isNarrow)
5754      TmpInst.addOperand(MCOperand::CreateReg(
5755          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5756    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5757    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5758    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5759    TmpInst.addOperand(Inst.getOperand(5));
5760    if (!isNarrow)
5761      TmpInst.addOperand(MCOperand::CreateReg(
5762          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5763    Inst = TmpInst;
5764    return true;
5765  }
5766  case ARM::t2MOVsi:
5767  case ARM::t2MOVSsi: {
5768    // Which instruction to expand to depends on the CCOut operand and
5769    // whether we're in an IT block if the register operands are low
5770    // registers.
5771    bool isNarrow = false;
5772    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5773        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5774        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5775      isNarrow = true;
5776    MCInst TmpInst;
5777    unsigned newOpc;
5778    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5779    default: llvm_unreachable("unexpected opcode!");
5780    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5781    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5782    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5783    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5784    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
5785    }
5786    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5787    if (Ammount == 32) Ammount = 0;
5788    TmpInst.setOpcode(newOpc);
5789    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5790    if (isNarrow)
5791      TmpInst.addOperand(MCOperand::CreateReg(
5792          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5793    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5794    if (newOpc != ARM::t2RRX)
5795      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5796    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5797    TmpInst.addOperand(Inst.getOperand(4));
5798    if (!isNarrow)
5799      TmpInst.addOperand(MCOperand::CreateReg(
5800          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5801    Inst = TmpInst;
5802    return true;
5803  }
5804  // Handle the ARM mode MOV complex aliases.
5805  case ARM::ASRr:
5806  case ARM::LSRr:
5807  case ARM::LSLr:
5808  case ARM::RORr: {
5809    ARM_AM::ShiftOpc ShiftTy;
5810    switch(Inst.getOpcode()) {
5811    default: llvm_unreachable("unexpected opcode!");
5812    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5813    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5814    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5815    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5816    }
5817    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5818    MCInst TmpInst;
5819    TmpInst.setOpcode(ARM::MOVsr);
5820    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5821    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5822    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5823    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5824    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5825    TmpInst.addOperand(Inst.getOperand(4));
5826    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5827    Inst = TmpInst;
5828    return true;
5829  }
5830  case ARM::ASRi:
5831  case ARM::LSRi:
5832  case ARM::LSLi:
5833  case ARM::RORi: {
5834    ARM_AM::ShiftOpc ShiftTy;
5835    switch(Inst.getOpcode()) {
5836    default: llvm_unreachable("unexpected opcode!");
5837    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5838    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5839    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5840    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5841    }
5842    // A shift by zero is a plain MOVr, not a MOVsi.
5843    unsigned Amt = Inst.getOperand(2).getImm();
5844    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5845    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5846    MCInst TmpInst;
5847    TmpInst.setOpcode(Opc);
5848    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5849    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5850    if (Opc == ARM::MOVsi)
5851      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5852    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5853    TmpInst.addOperand(Inst.getOperand(4));
5854    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5855    Inst = TmpInst;
5856    return true;
5857  }
5858  case ARM::RRXi: {
5859    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5860    MCInst TmpInst;
5861    TmpInst.setOpcode(ARM::MOVsi);
5862    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5863    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5864    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5865    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5866    TmpInst.addOperand(Inst.getOperand(3));
5867    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5868    Inst = TmpInst;
5869    return true;
5870  }
5871  case ARM::t2LDMIA_UPD: {
5872    // If this is a load of a single register, then we should use
5873    // a post-indexed LDR instruction instead, per the ARM ARM.
5874    if (Inst.getNumOperands() != 5)
5875      return false;
5876    MCInst TmpInst;
5877    TmpInst.setOpcode(ARM::t2LDR_POST);
5878    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5879    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5880    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5881    TmpInst.addOperand(MCOperand::CreateImm(4));
5882    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5883    TmpInst.addOperand(Inst.getOperand(3));
5884    Inst = TmpInst;
5885    return true;
5886  }
5887  case ARM::t2STMDB_UPD: {
5888    // If this is a store of a single register, then we should use
5889    // a pre-indexed STR instruction instead, per the ARM ARM.
5890    if (Inst.getNumOperands() != 5)
5891      return false;
5892    MCInst TmpInst;
5893    TmpInst.setOpcode(ARM::t2STR_PRE);
5894    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5895    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5896    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5897    TmpInst.addOperand(MCOperand::CreateImm(-4));
5898    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5899    TmpInst.addOperand(Inst.getOperand(3));
5900    Inst = TmpInst;
5901    return true;
5902  }
5903  case ARM::LDMIA_UPD:
5904    // If this is a load of a single register via a 'pop', then we should use
5905    // a post-indexed LDR instruction instead, per the ARM ARM.
5906    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5907        Inst.getNumOperands() == 5) {
5908      MCInst TmpInst;
5909      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5910      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5911      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5912      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5913      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5914      TmpInst.addOperand(MCOperand::CreateImm(4));
5915      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5916      TmpInst.addOperand(Inst.getOperand(3));
5917      Inst = TmpInst;
5918      return true;
5919    }
5920    break;
5921  case ARM::STMDB_UPD:
5922    // If this is a store of a single register via a 'push', then we should use
5923    // a pre-indexed STR instruction instead, per the ARM ARM.
5924    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5925        Inst.getNumOperands() == 5) {
5926      MCInst TmpInst;
5927      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5928      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5929      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5930      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5931      TmpInst.addOperand(MCOperand::CreateImm(-4));
5932      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5933      TmpInst.addOperand(Inst.getOperand(3));
5934      Inst = TmpInst;
5935    }
5936    break;
5937  case ARM::t2ADDri12:
5938    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5939    // mnemonic was used (not "addw"), encoding T3 is preferred.
5940    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5941        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5942      break;
5943    Inst.setOpcode(ARM::t2ADDri);
5944    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5945    break;
5946  case ARM::t2SUBri12:
5947    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5948    // mnemonic was used (not "subw"), encoding T3 is preferred.
5949    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5950        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5951      break;
5952    Inst.setOpcode(ARM::t2SUBri);
5953    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5954    break;
5955  case ARM::tADDi8:
5956    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5957    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5958    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5959    // to encoding T1 if <Rd> is omitted."
5960    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5961      Inst.setOpcode(ARM::tADDi3);
5962      return true;
5963    }
5964    break;
5965  case ARM::tSUBi8:
5966    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5967    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5968    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5969    // to encoding T1 if <Rd> is omitted."
5970    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5971      Inst.setOpcode(ARM::tSUBi3);
5972      return true;
5973    }
5974    break;
5975  case ARM::t2ADDrr: {
5976    // If the destination and first source operand are the same, and
5977    // there's no setting of the flags, use encoding T2 instead of T3.
5978    // Note that this is only for ADD, not SUB. This mirrors the system
5979    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5980    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5981        Inst.getOperand(5).getReg() != 0 ||
5982        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5983         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5984      break;
5985    MCInst TmpInst;
5986    TmpInst.setOpcode(ARM::tADDhirr);
5987    TmpInst.addOperand(Inst.getOperand(0));
5988    TmpInst.addOperand(Inst.getOperand(0));
5989    TmpInst.addOperand(Inst.getOperand(2));
5990    TmpInst.addOperand(Inst.getOperand(3));
5991    TmpInst.addOperand(Inst.getOperand(4));
5992    Inst = TmpInst;
5993    return true;
5994  }
5995  case ARM::tB:
5996    // A Thumb conditional branch outside of an IT block is a tBcc.
5997    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5998      Inst.setOpcode(ARM::tBcc);
5999      return true;
6000    }
6001    break;
6002  case ARM::t2B:
6003    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6004    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6005      Inst.setOpcode(ARM::t2Bcc);
6006      return true;
6007    }
6008    break;
6009  case ARM::t2Bcc:
6010    // If the conditional is AL or we're in an IT block, we really want t2B.
6011    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6012      Inst.setOpcode(ARM::t2B);
6013      return true;
6014    }
6015    break;
6016  case ARM::tBcc:
6017    // If the conditional is AL, we really want tB.
6018    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6019      Inst.setOpcode(ARM::tB);
6020      return true;
6021    }
6022    break;
6023  case ARM::tLDMIA: {
6024    // If the register list contains any high registers, or if the writeback
6025    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6026    // instead if we're in Thumb2. Otherwise, this should have generated
6027    // an error in validateInstruction().
6028    unsigned Rn = Inst.getOperand(0).getReg();
6029    bool hasWritebackToken =
6030      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6031       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6032    bool listContainsBase;
6033    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6034        (!listContainsBase && !hasWritebackToken) ||
6035        (listContainsBase && hasWritebackToken)) {
6036      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6037      assert (isThumbTwo());
6038      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6039      // If we're switching to the updating version, we need to insert
6040      // the writeback tied operand.
6041      if (hasWritebackToken)
6042        Inst.insert(Inst.begin(),
6043                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6044      return true;
6045    }
6046    break;
6047  }
6048  case ARM::tSTMIA_UPD: {
6049    // If the register list contains any high registers, we need to use
6050    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6051    // should have generated an error in validateInstruction().
6052    unsigned Rn = Inst.getOperand(0).getReg();
6053    bool listContainsBase;
6054    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6055      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6056      assert (isThumbTwo());
6057      Inst.setOpcode(ARM::t2STMIA_UPD);
6058      return true;
6059    }
6060    break;
6061  }
6062  case ARM::tPOP: {
6063    bool listContainsBase;
6064    // If the register list contains any high registers, we need to use
6065    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6066    // should have generated an error in validateInstruction().
6067    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6068      return false;
6069    assert (isThumbTwo());
6070    Inst.setOpcode(ARM::t2LDMIA_UPD);
6071    // Add the base register and writeback operands.
6072    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6073    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6074    return true;
6075  }
6076  case ARM::tPUSH: {
6077    bool listContainsBase;
6078    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6079      return false;
6080    assert (isThumbTwo());
6081    Inst.setOpcode(ARM::t2STMDB_UPD);
6082    // Add the base register and writeback operands.
6083    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6084    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6085    return true;
6086  }
6087  case ARM::t2MOVi: {
6088    // If we can use the 16-bit encoding and the user didn't explicitly
6089    // request the 32-bit variant, transform it here.
6090    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6091        Inst.getOperand(1).getImm() <= 255 &&
6092        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6093         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6094        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6095        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6096         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6097      // The operands aren't in the same order for tMOVi8...
6098      MCInst TmpInst;
6099      TmpInst.setOpcode(ARM::tMOVi8);
6100      TmpInst.addOperand(Inst.getOperand(0));
6101      TmpInst.addOperand(Inst.getOperand(4));
6102      TmpInst.addOperand(Inst.getOperand(1));
6103      TmpInst.addOperand(Inst.getOperand(2));
6104      TmpInst.addOperand(Inst.getOperand(3));
6105      Inst = TmpInst;
6106      return true;
6107    }
6108    break;
6109  }
6110  case ARM::t2MOVr: {
6111    // If we can use the 16-bit encoding and the user didn't explicitly
6112    // request the 32-bit variant, transform it here.
6113    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6114        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6115        Inst.getOperand(2).getImm() == ARMCC::AL &&
6116        Inst.getOperand(4).getReg() == ARM::CPSR &&
6117        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6118         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6119      // The operands aren't the same for tMOV[S]r... (no cc_out)
6120      MCInst TmpInst;
6121      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6122      TmpInst.addOperand(Inst.getOperand(0));
6123      TmpInst.addOperand(Inst.getOperand(1));
6124      TmpInst.addOperand(Inst.getOperand(2));
6125      TmpInst.addOperand(Inst.getOperand(3));
6126      Inst = TmpInst;
6127      return true;
6128    }
6129    break;
6130  }
6131  case ARM::t2SXTH:
6132  case ARM::t2SXTB:
6133  case ARM::t2UXTH:
6134  case ARM::t2UXTB: {
6135    // If we can use the 16-bit encoding and the user didn't explicitly
6136    // request the 32-bit variant, transform it here.
6137    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6138        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6139        Inst.getOperand(2).getImm() == 0 &&
6140        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6141         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6142      unsigned NewOpc;
6143      switch (Inst.getOpcode()) {
6144      default: llvm_unreachable("Illegal opcode!");
6145      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6146      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6147      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6148      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6149      }
6150      // The operands aren't the same for thumb1 (no rotate operand).
6151      MCInst TmpInst;
6152      TmpInst.setOpcode(NewOpc);
6153      TmpInst.addOperand(Inst.getOperand(0));
6154      TmpInst.addOperand(Inst.getOperand(1));
6155      TmpInst.addOperand(Inst.getOperand(3));
6156      TmpInst.addOperand(Inst.getOperand(4));
6157      Inst = TmpInst;
6158      return true;
6159    }
6160    break;
6161  }
6162  case ARM::MOVsi: {
6163    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6164    if (SOpc == ARM_AM::rrx) return false;
6165    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6166      // Shifting by zero is accepted as a vanilla 'MOVr'
6167      MCInst TmpInst;
6168      TmpInst.setOpcode(ARM::MOVr);
6169      TmpInst.addOperand(Inst.getOperand(0));
6170      TmpInst.addOperand(Inst.getOperand(1));
6171      TmpInst.addOperand(Inst.getOperand(3));
6172      TmpInst.addOperand(Inst.getOperand(4));
6173      TmpInst.addOperand(Inst.getOperand(5));
6174      Inst = TmpInst;
6175      return true;
6176    }
6177    return false;
6178  }
6179  case ARM::ANDrsi:
6180  case ARM::ORRrsi:
6181  case ARM::EORrsi:
6182  case ARM::BICrsi:
6183  case ARM::SUBrsi:
6184  case ARM::ADDrsi: {
6185    unsigned newOpc;
6186    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
6187    if (SOpc == ARM_AM::rrx) return false;
6188    switch (Inst.getOpcode()) {
6189    default: assert(0 && "unexpected opcode!");
6190    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
6191    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
6192    case ARM::EORrsi: newOpc = ARM::EORrr; break;
6193    case ARM::BICrsi: newOpc = ARM::BICrr; break;
6194    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
6195    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
6196    }
6197    // If the shift is by zero, use the non-shifted instruction definition.
6198    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
6199      MCInst TmpInst;
6200      TmpInst.setOpcode(newOpc);
6201      TmpInst.addOperand(Inst.getOperand(0));
6202      TmpInst.addOperand(Inst.getOperand(1));
6203      TmpInst.addOperand(Inst.getOperand(2));
6204      TmpInst.addOperand(Inst.getOperand(4));
6205      TmpInst.addOperand(Inst.getOperand(5));
6206      TmpInst.addOperand(Inst.getOperand(6));
6207      Inst = TmpInst;
6208      return true;
6209    }
6210    return false;
6211  }
6212  case ARM::t2IT: {
6213    // The mask bits for all but the first condition are represented as
6214    // the low bit of the condition code value implies 't'. We currently
6215    // always have 1 implies 't', so XOR toggle the bits if the low bit
6216    // of the condition code is zero. The encoding also expects the low
6217    // bit of the condition to be encoded as bit 4 of the mask operand,
6218    // so mask that in if needed
6219    MCOperand &MO = Inst.getOperand(1);
6220    unsigned Mask = MO.getImm();
6221    unsigned OrigMask = Mask;
6222    unsigned TZ = CountTrailingZeros_32(Mask);
6223    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6224      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6225      for (unsigned i = 3; i != TZ; --i)
6226        Mask ^= 1 << i;
6227    } else
6228      Mask |= 0x10;
6229    MO.setImm(Mask);
6230
6231    // Set up the IT block state according to the IT instruction we just
6232    // matched.
6233    assert(!inITBlock() && "nested IT blocks?!");
6234    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6235    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6236    ITState.CurPosition = 0;
6237    ITState.FirstCond = true;
6238    break;
6239  }
6240  }
6241  return false;
6242}
6243
6244unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6245  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6246  // suffix depending on whether they're in an IT block or not.
6247  unsigned Opc = Inst.getOpcode();
6248  const MCInstrDesc &MCID = getInstDesc(Opc);
6249  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6250    assert(MCID.hasOptionalDef() &&
6251           "optionally flag setting instruction missing optional def operand");
6252    assert(MCID.NumOperands == Inst.getNumOperands() &&
6253           "operand count mismatch!");
6254    // Find the optional-def operand (cc_out).
6255    unsigned OpNo;
6256    for (OpNo = 0;
6257         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6258         ++OpNo)
6259      ;
6260    // If we're parsing Thumb1, reject it completely.
6261    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6262      return Match_MnemonicFail;
6263    // If we're parsing Thumb2, which form is legal depends on whether we're
6264    // in an IT block.
6265    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6266        !inITBlock())
6267      return Match_RequiresITBlock;
6268    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6269        inITBlock())
6270      return Match_RequiresNotITBlock;
6271  }
6272  // Some high-register supporting Thumb1 encodings only allow both registers
6273  // to be from r0-r7 when in Thumb2.
6274  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6275           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6276           isARMLowRegister(Inst.getOperand(2).getReg()))
6277    return Match_RequiresThumb2;
6278  // Others only require ARMv6 or later.
6279  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6280           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6281           isARMLowRegister(Inst.getOperand(1).getReg()))
6282    return Match_RequiresV6;
6283  return Match_Success;
6284}
6285
6286bool ARMAsmParser::
6287MatchAndEmitInstruction(SMLoc IDLoc,
6288                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6289                        MCStreamer &Out) {
6290  MCInst Inst;
6291  unsigned ErrorInfo;
6292  unsigned MatchResult;
6293  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6294  switch (MatchResult) {
6295  default: break;
6296  case Match_Success:
6297    // Context sensitive operand constraints aren't handled by the matcher,
6298    // so check them here.
6299    if (validateInstruction(Inst, Operands)) {
6300      // Still progress the IT block, otherwise one wrong condition causes
6301      // nasty cascading errors.
6302      forwardITPosition();
6303      return true;
6304    }
6305
6306    // Some instructions need post-processing to, for example, tweak which
6307    // encoding is selected. Loop on it while changes happen so the
6308    // individual transformations can chain off each other. E.g.,
6309    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6310    while (processInstruction(Inst, Operands))
6311      ;
6312
6313    // Only move forward at the very end so that everything in validate
6314    // and process gets a consistent answer about whether we're in an IT
6315    // block.
6316    forwardITPosition();
6317
6318    Out.EmitInstruction(Inst);
6319    return false;
6320  case Match_MissingFeature:
6321    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6322    return true;
6323  case Match_InvalidOperand: {
6324    SMLoc ErrorLoc = IDLoc;
6325    if (ErrorInfo != ~0U) {
6326      if (ErrorInfo >= Operands.size())
6327        return Error(IDLoc, "too few operands for instruction");
6328
6329      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6330      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6331    }
6332
6333    return Error(ErrorLoc, "invalid operand for instruction");
6334  }
6335  case Match_MnemonicFail:
6336    return Error(IDLoc, "invalid instruction");
6337  case Match_ConversionFail:
6338    // The converter function will have already emited a diagnostic.
6339    return true;
6340  case Match_RequiresNotITBlock:
6341    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6342  case Match_RequiresITBlock:
6343    return Error(IDLoc, "instruction only valid inside IT block");
6344  case Match_RequiresV6:
6345    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6346  case Match_RequiresThumb2:
6347    return Error(IDLoc, "instruction variant requires Thumb2");
6348  }
6349
6350  llvm_unreachable("Implement any new match types added!");
6351  return true;
6352}
6353
6354/// parseDirective parses the arm specific directives
6355bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6356  StringRef IDVal = DirectiveID.getIdentifier();
6357  if (IDVal == ".word")
6358    return parseDirectiveWord(4, DirectiveID.getLoc());
6359  else if (IDVal == ".thumb")
6360    return parseDirectiveThumb(DirectiveID.getLoc());
6361  else if (IDVal == ".arm")
6362    return parseDirectiveARM(DirectiveID.getLoc());
6363  else if (IDVal == ".thumb_func")
6364    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6365  else if (IDVal == ".code")
6366    return parseDirectiveCode(DirectiveID.getLoc());
6367  else if (IDVal == ".syntax")
6368    return parseDirectiveSyntax(DirectiveID.getLoc());
6369  else if (IDVal == ".unreq")
6370    return parseDirectiveUnreq(DirectiveID.getLoc());
6371  else if (IDVal == ".arch")
6372    return parseDirectiveArch(DirectiveID.getLoc());
6373  else if (IDVal == ".eabi_attribute")
6374    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6375  return true;
6376}
6377
6378/// parseDirectiveWord
6379///  ::= .word [ expression (, expression)* ]
6380bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6381  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6382    for (;;) {
6383      const MCExpr *Value;
6384      if (getParser().ParseExpression(Value))
6385        return true;
6386
6387      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6388
6389      if (getLexer().is(AsmToken::EndOfStatement))
6390        break;
6391
6392      // FIXME: Improve diagnostic.
6393      if (getLexer().isNot(AsmToken::Comma))
6394        return Error(L, "unexpected token in directive");
6395      Parser.Lex();
6396    }
6397  }
6398
6399  Parser.Lex();
6400  return false;
6401}
6402
6403/// parseDirectiveThumb
6404///  ::= .thumb
6405bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6406  if (getLexer().isNot(AsmToken::EndOfStatement))
6407    return Error(L, "unexpected token in directive");
6408  Parser.Lex();
6409
6410  if (!isThumb())
6411    SwitchMode();
6412  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6413  return false;
6414}
6415
6416/// parseDirectiveARM
6417///  ::= .arm
6418bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6419  if (getLexer().isNot(AsmToken::EndOfStatement))
6420    return Error(L, "unexpected token in directive");
6421  Parser.Lex();
6422
6423  if (isThumb())
6424    SwitchMode();
6425  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6426  return false;
6427}
6428
6429/// parseDirectiveThumbFunc
6430///  ::= .thumbfunc symbol_name
6431bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6432  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6433  bool isMachO = MAI.hasSubsectionsViaSymbols();
6434  StringRef Name;
6435  bool needFuncName = true;
6436
6437  // Darwin asm has (optionally) function name after .thumb_func direction
6438  // ELF doesn't
6439  if (isMachO) {
6440    const AsmToken &Tok = Parser.getTok();
6441    if (Tok.isNot(AsmToken::EndOfStatement)) {
6442      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6443        return Error(L, "unexpected token in .thumb_func directive");
6444      Name = Tok.getIdentifier();
6445      Parser.Lex(); // Consume the identifier token.
6446      needFuncName = false;
6447    }
6448  }
6449
6450  if (getLexer().isNot(AsmToken::EndOfStatement))
6451    return Error(L, "unexpected token in directive");
6452
6453  // Eat the end of statement and any blank lines that follow.
6454  while (getLexer().is(AsmToken::EndOfStatement))
6455    Parser.Lex();
6456
6457  // FIXME: assuming function name will be the line following .thumb_func
6458  // We really should be checking the next symbol definition even if there's
6459  // stuff in between.
6460  if (needFuncName) {
6461    Name = Parser.getTok().getIdentifier();
6462  }
6463
6464  // Mark symbol as a thumb symbol.
6465  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6466  getParser().getStreamer().EmitThumbFunc(Func);
6467  return false;
6468}
6469
6470/// parseDirectiveSyntax
6471///  ::= .syntax unified | divided
6472bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6473  const AsmToken &Tok = Parser.getTok();
6474  if (Tok.isNot(AsmToken::Identifier))
6475    return Error(L, "unexpected token in .syntax directive");
6476  StringRef Mode = Tok.getString();
6477  if (Mode == "unified" || Mode == "UNIFIED")
6478    Parser.Lex();
6479  else if (Mode == "divided" || Mode == "DIVIDED")
6480    return Error(L, "'.syntax divided' arm asssembly not supported");
6481  else
6482    return Error(L, "unrecognized syntax mode in .syntax directive");
6483
6484  if (getLexer().isNot(AsmToken::EndOfStatement))
6485    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6486  Parser.Lex();
6487
6488  // TODO tell the MC streamer the mode
6489  // getParser().getStreamer().Emit???();
6490  return false;
6491}
6492
6493/// parseDirectiveCode
6494///  ::= .code 16 | 32
6495bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6496  const AsmToken &Tok = Parser.getTok();
6497  if (Tok.isNot(AsmToken::Integer))
6498    return Error(L, "unexpected token in .code directive");
6499  int64_t Val = Parser.getTok().getIntVal();
6500  if (Val == 16)
6501    Parser.Lex();
6502  else if (Val == 32)
6503    Parser.Lex();
6504  else
6505    return Error(L, "invalid operand to .code directive");
6506
6507  if (getLexer().isNot(AsmToken::EndOfStatement))
6508    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6509  Parser.Lex();
6510
6511  if (Val == 16) {
6512    if (!isThumb())
6513      SwitchMode();
6514    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6515  } else {
6516    if (isThumb())
6517      SwitchMode();
6518    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6519  }
6520
6521  return false;
6522}
6523
6524/// parseDirectiveReq
6525///  ::= name .req registername
6526bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6527  Parser.Lex(); // Eat the '.req' token.
6528  unsigned Reg;
6529  SMLoc SRegLoc, ERegLoc;
6530  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6531    Parser.EatToEndOfStatement();
6532    return Error(SRegLoc, "register name expected");
6533  }
6534
6535  // Shouldn't be anything else.
6536  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6537    Parser.EatToEndOfStatement();
6538    return Error(Parser.getTok().getLoc(),
6539                 "unexpected input in .req directive.");
6540  }
6541
6542  Parser.Lex(); // Consume the EndOfStatement
6543
6544  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6545    return Error(SRegLoc, "redefinition of '" + Name +
6546                          "' does not match original.");
6547
6548  return false;
6549}
6550
6551/// parseDirectiveUneq
6552///  ::= .unreq registername
6553bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6554  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6555    Parser.EatToEndOfStatement();
6556    return Error(L, "unexpected input in .unreq directive.");
6557  }
6558  RegisterReqs.erase(Parser.getTok().getIdentifier());
6559  Parser.Lex(); // Eat the identifier.
6560  return false;
6561}
6562
6563/// parseDirectiveArch
6564///  ::= .arch token
6565bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6566  return true;
6567}
6568
6569/// parseDirectiveEabiAttr
6570///  ::= .eabi_attribute int, int
6571bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6572  return true;
6573}
6574
6575extern "C" void LLVMInitializeARMAsmLexer();
6576
6577/// Force static initialization.
6578extern "C" void LLVMInitializeARMAsmParser() {
6579  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6580  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6581  LLVMInitializeARMAsmLexer();
6582}
6583
6584#define GET_REGISTER_MATCHER
6585#define GET_MATCHER_IMPLEMENTATION
6586#include "ARMGenAsmMatcher.inc"
6587