ARMAsmParser.cpp revision 21bcca81f4597f1c7d939e5d69067539ff804e6d
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_FPImmediate,
274    k_MemBarrierOpt,
275    k_Memory,
276    k_PostIndexRegister,
277    k_MSRMask,
278    k_ProcIFlags,
279    k_VectorIndex,
280    k_Register,
281    k_RegisterList,
282    k_DPRRegisterList,
283    k_SPRRegisterList,
284    k_VectorList,
285    k_VectorListAllLanes,
286    k_VectorListIndexed,
287    k_ShiftedRegister,
288    k_ShiftedImmediate,
289    k_ShifterImmediate,
290    k_RotateImmediate,
291    k_BitfieldDescriptor,
292    k_Token
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296  SmallVector<unsigned, 8> Registers;
297
298  union {
299    struct {
300      ARMCC::CondCodes Val;
301    } CC;
302
303    struct {
304      unsigned Val;
305    } Cop;
306
307    struct {
308      unsigned Val;
309    } CoprocOption;
310
311    struct {
312      unsigned Mask:4;
313    } ITMask;
314
315    struct {
316      ARM_MB::MemBOpt Val;
317    } MBOpt;
318
319    struct {
320      ARM_PROC::IFlags Val;
321    } IFlags;
322
323    struct {
324      unsigned Val;
325    } MMask;
326
327    struct {
328      const char *Data;
329      unsigned Length;
330    } Tok;
331
332    struct {
333      unsigned RegNum;
334    } Reg;
335
336    // A vector register list is a sequential list of 1 to 4 registers.
337    struct {
338      unsigned RegNum;
339      unsigned Count;
340      unsigned LaneIndex;
341      bool isDoubleSpaced;
342    } VectorList;
343
344    struct {
345      unsigned Val;
346    } VectorIndex;
347
348    struct {
349      const MCExpr *Val;
350    } Imm;
351
352    struct {
353      unsigned Val;       // encoded 8-bit representation
354    } FPImm;
355
356    /// Combined record for all forms of ARM address expressions.
357    struct {
358      unsigned BaseRegNum;
359      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
360      // was specified.
361      const MCConstantExpr *OffsetImm;  // Offset immediate value
362      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
363      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
364      unsigned ShiftImm;        // shift for OffsetReg.
365      unsigned Alignment;       // 0 = no alignment specified
366                                // n = alignment in bytes (2, 4, 8, 16, or 32)
367      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
368    } Memory;
369
370    struct {
371      unsigned RegNum;
372      bool isAdd;
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned ShiftImm;
375    } PostIdxReg;
376
377    struct {
378      bool isASR;
379      unsigned Imm;
380    } ShifterImm;
381    struct {
382      ARM_AM::ShiftOpc ShiftTy;
383      unsigned SrcReg;
384      unsigned ShiftReg;
385      unsigned ShiftImm;
386    } RegShiftedReg;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftImm;
391    } RegShiftedImm;
392    struct {
393      unsigned Imm;
394    } RotImm;
395    struct {
396      unsigned LSB;
397      unsigned Width;
398    } Bitfield;
399  };
400
401  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
402public:
403  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
404    Kind = o.Kind;
405    StartLoc = o.StartLoc;
406    EndLoc = o.EndLoc;
407    switch (Kind) {
408    case k_CondCode:
409      CC = o.CC;
410      break;
411    case k_ITCondMask:
412      ITMask = o.ITMask;
413      break;
414    case k_Token:
415      Tok = o.Tok;
416      break;
417    case k_CCOut:
418    case k_Register:
419      Reg = o.Reg;
420      break;
421    case k_RegisterList:
422    case k_DPRRegisterList:
423    case k_SPRRegisterList:
424      Registers = o.Registers;
425      break;
426    case k_VectorList:
427    case k_VectorListAllLanes:
428    case k_VectorListIndexed:
429      VectorList = o.VectorList;
430      break;
431    case k_CoprocNum:
432    case k_CoprocReg:
433      Cop = o.Cop;
434      break;
435    case k_CoprocOption:
436      CoprocOption = o.CoprocOption;
437      break;
438    case k_Immediate:
439      Imm = o.Imm;
440      break;
441    case k_FPImmediate:
442      FPImm = o.FPImm;
443      break;
444    case k_MemBarrierOpt:
445      MBOpt = o.MBOpt;
446      break;
447    case k_Memory:
448      Memory = o.Memory;
449      break;
450    case k_PostIndexRegister:
451      PostIdxReg = o.PostIdxReg;
452      break;
453    case k_MSRMask:
454      MMask = o.MMask;
455      break;
456    case k_ProcIFlags:
457      IFlags = o.IFlags;
458      break;
459    case k_ShifterImmediate:
460      ShifterImm = o.ShifterImm;
461      break;
462    case k_ShiftedRegister:
463      RegShiftedReg = o.RegShiftedReg;
464      break;
465    case k_ShiftedImmediate:
466      RegShiftedImm = o.RegShiftedImm;
467      break;
468    case k_RotateImmediate:
469      RotImm = o.RotImm;
470      break;
471    case k_BitfieldDescriptor:
472      Bitfield = o.Bitfield;
473      break;
474    case k_VectorIndex:
475      VectorIndex = o.VectorIndex;
476      break;
477    }
478  }
479
480  /// getStartLoc - Get the location of the first token of this operand.
481  SMLoc getStartLoc() const { return StartLoc; }
482  /// getEndLoc - Get the location of the last token of this operand.
483  SMLoc getEndLoc() const { return EndLoc; }
484
485  ARMCC::CondCodes getCondCode() const {
486    assert(Kind == k_CondCode && "Invalid access!");
487    return CC.Val;
488  }
489
490  unsigned getCoproc() const {
491    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
492    return Cop.Val;
493  }
494
495  StringRef getToken() const {
496    assert(Kind == k_Token && "Invalid access!");
497    return StringRef(Tok.Data, Tok.Length);
498  }
499
500  unsigned getReg() const {
501    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
502    return Reg.RegNum;
503  }
504
505  const SmallVectorImpl<unsigned> &getRegList() const {
506    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
507            Kind == k_SPRRegisterList) && "Invalid access!");
508    return Registers;
509  }
510
511  const MCExpr *getImm() const {
512    assert(isImm() && "Invalid access!");
513    return Imm.Val;
514  }
515
516  unsigned getFPImm() const {
517    assert(Kind == k_FPImmediate && "Invalid access!");
518    return FPImm.Val;
519  }
520
521  unsigned getVectorIndex() const {
522    assert(Kind == k_VectorIndex && "Invalid access!");
523    return VectorIndex.Val;
524  }
525
526  ARM_MB::MemBOpt getMemBarrierOpt() const {
527    assert(Kind == k_MemBarrierOpt && "Invalid access!");
528    return MBOpt.Val;
529  }
530
531  ARM_PROC::IFlags getProcIFlags() const {
532    assert(Kind == k_ProcIFlags && "Invalid access!");
533    return IFlags.Val;
534  }
535
536  unsigned getMSRMask() const {
537    assert(Kind == k_MSRMask && "Invalid access!");
538    return MMask.Val;
539  }
540
541  bool isCoprocNum() const { return Kind == k_CoprocNum; }
542  bool isCoprocReg() const { return Kind == k_CoprocReg; }
543  bool isCoprocOption() const { return Kind == k_CoprocOption; }
544  bool isCondCode() const { return Kind == k_CondCode; }
545  bool isCCOut() const { return Kind == k_CCOut; }
546  bool isITMask() const { return Kind == k_ITCondMask; }
547  bool isITCondCode() const { return Kind == k_CondCode; }
548  bool isImm() const { return Kind == k_Immediate; }
549  bool isFPImm() const { return Kind == k_FPImmediate; }
550  bool isImm8s4() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
556  }
557  bool isImm0_1020s4() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
563  }
564  bool isImm0_508s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
570  }
571  bool isImm0_255() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return Value >= 0 && Value < 256;
577  }
578  bool isImm0_1() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return Value >= 0 && Value < 2;
584  }
585  bool isImm0_3() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 4;
591  }
592  bool isImm0_7() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 8;
598  }
599  bool isImm0_15() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 16;
605  }
606  bool isImm0_31() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 32;
612  }
613  bool isImm0_63() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 64;
619  }
620  bool isImm8() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value == 8;
626  }
627  bool isImm16() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value == 16;
633  }
634  bool isImm32() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 32;
640  }
641  bool isShrImm8() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value > 0 && Value <= 8;
647  }
648  bool isShrImm16() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value > 0 && Value <= 16;
654  }
655  bool isShrImm32() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 32;
661  }
662  bool isShrImm64() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 64;
668  }
669  bool isImm1_7() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value < 8;
675  }
676  bool isImm1_15() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value < 16;
682  }
683  bool isImm1_31() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 32;
689  }
690  bool isImm1_16() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 17;
696  }
697  bool isImm1_32() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 33;
703  }
704  bool isImm0_32() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value >= 0 && Value < 33;
710  }
711  bool isImm0_65535() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value >= 0 && Value < 65536;
717  }
718  bool isImm0_65535Expr() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    // If it's not a constant expression, it'll generate a fixup and be
722    // handled later.
723    if (!CE) return true;
724    int64_t Value = CE->getValue();
725    return Value >= 0 && Value < 65536;
726  }
727  bool isImm24bit() const {
728    if (!isImm()) return false;
729    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
730    if (!CE) return false;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value <= 0xffffff;
733  }
734  bool isImmThumbSR() const {
735    if (!isImm()) return false;
736    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
737    if (!CE) return false;
738    int64_t Value = CE->getValue();
739    return Value > 0 && Value < 33;
740  }
741  bool isPKHLSLImm() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value >= 0 && Value < 32;
747  }
748  bool isPKHASRImm() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value > 0 && Value <= 32;
754  }
755  bool isARMSOImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return ARM_AM::getSOImmVal(Value) != -1;
761  }
762  bool isARMSOImmNot() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return ARM_AM::getSOImmVal(~Value) != -1;
768  }
769  bool isARMSOImmNeg() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(-Value) != -1;
775  }
776  bool isT2SOImm() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getT2SOImmVal(Value) != -1;
782  }
783  bool isT2SOImmNot() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getT2SOImmVal(~Value) != -1;
789  }
790  bool isT2SOImmNeg() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(-Value) != -1;
796  }
797  bool isSetEndImm() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return Value == 1 || Value == 0;
803  }
804  bool isReg() const { return Kind == k_Register; }
805  bool isRegList() const { return Kind == k_RegisterList; }
806  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
807  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
808  bool isToken() const { return Kind == k_Token; }
809  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
810  bool isMemory() const { return Kind == k_Memory; }
811  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
812  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
813  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
814  bool isRotImm() const { return Kind == k_RotateImmediate; }
815  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
816  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
817  bool isPostIdxReg() const {
818    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
819  }
820  bool isMemNoOffset(bool alignOK = false) const {
821    if (!isMemory())
822      return false;
823    // No offset of any kind.
824    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
825     (alignOK || Memory.Alignment == 0);
826  }
827  bool isAlignedMemory() const {
828    return isMemNoOffset(true);
829  }
830  bool isAddrMode2() const {
831    if (!isMemory() || Memory.Alignment != 0) return false;
832    // Check for register offset.
833    if (Memory.OffsetRegNum) return true;
834    // Immediate offset in range [-4095, 4095].
835    if (!Memory.OffsetImm) return true;
836    int64_t Val = Memory.OffsetImm->getValue();
837    return Val > -4096 && Val < 4096;
838  }
839  bool isAM2OffsetImm() const {
840    if (!isImm()) return false;
841    // Immediate offset in range [-4095, 4095].
842    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
843    if (!CE) return false;
844    int64_t Val = CE->getValue();
845    return Val > -4096 && Val < 4096;
846  }
847  bool isAddrMode3() const {
848    // If we have an immediate that's not a constant, treat it as a label
849    // reference needing a fixup. If it is a constant, it's something else
850    // and we reject it.
851    if (isImm() && !isa<MCConstantExpr>(getImm()))
852      return true;
853    if (!isMemory() || Memory.Alignment != 0) return false;
854    // No shifts are legal for AM3.
855    if (Memory.ShiftType != ARM_AM::no_shift) return false;
856    // Check for register offset.
857    if (Memory.OffsetRegNum) return true;
858    // Immediate offset in range [-255, 255].
859    if (!Memory.OffsetImm) return true;
860    int64_t Val = Memory.OffsetImm->getValue();
861    return Val > -256 && Val < 256;
862  }
863  bool isAM3Offset() const {
864    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
865      return false;
866    if (Kind == k_PostIndexRegister)
867      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
868    // Immediate offset in range [-255, 255].
869    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
870    if (!CE) return false;
871    int64_t Val = CE->getValue();
872    // Special case, #-0 is INT32_MIN.
873    return (Val > -256 && Val < 256) || Val == INT32_MIN;
874  }
875  bool isAddrMode5() const {
876    // If we have an immediate that's not a constant, treat it as a label
877    // reference needing a fixup. If it is a constant, it's something else
878    // and we reject it.
879    if (isImm() && !isa<MCConstantExpr>(getImm()))
880      return true;
881    if (!isMemory() || Memory.Alignment != 0) return false;
882    // Check for register offset.
883    if (Memory.OffsetRegNum) return false;
884    // Immediate offset in range [-1020, 1020] and a multiple of 4.
885    if (!Memory.OffsetImm) return true;
886    int64_t Val = Memory.OffsetImm->getValue();
887    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
888      Val == INT32_MIN;
889  }
890  bool isMemTBB() const {
891    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
892        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
893      return false;
894    return true;
895  }
896  bool isMemTBH() const {
897    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
898        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
899        Memory.Alignment != 0 )
900      return false;
901    return true;
902  }
903  bool isMemRegOffset() const {
904    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
905      return false;
906    return true;
907  }
908  bool isT2MemRegOffset() const {
909    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
910        Memory.Alignment != 0)
911      return false;
912    // Only lsl #{0, 1, 2, 3} allowed.
913    if (Memory.ShiftType == ARM_AM::no_shift)
914      return true;
915    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
916      return false;
917    return true;
918  }
919  bool isMemThumbRR() const {
920    // Thumb reg+reg addressing is simple. Just two registers, a base and
921    // an offset. No shifts, negations or any other complicating factors.
922    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
923        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
924      return false;
925    return isARMLowRegister(Memory.BaseRegNum) &&
926      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
927  }
928  bool isMemThumbRIs4() const {
929    if (!isMemory() || Memory.OffsetRegNum != 0 ||
930        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
931      return false;
932    // Immediate offset, multiple of 4 in range [0, 124].
933    if (!Memory.OffsetImm) return true;
934    int64_t Val = Memory.OffsetImm->getValue();
935    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
936  }
937  bool isMemThumbRIs2() const {
938    if (!isMemory() || Memory.OffsetRegNum != 0 ||
939        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
940      return false;
941    // Immediate offset, multiple of 4 in range [0, 62].
942    if (!Memory.OffsetImm) return true;
943    int64_t Val = Memory.OffsetImm->getValue();
944    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
945  }
946  bool isMemThumbRIs1() const {
947    if (!isMemory() || Memory.OffsetRegNum != 0 ||
948        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
949      return false;
950    // Immediate offset in range [0, 31].
951    if (!Memory.OffsetImm) return true;
952    int64_t Val = Memory.OffsetImm->getValue();
953    return Val >= 0 && Val <= 31;
954  }
955  bool isMemThumbSPI() const {
956    if (!isMemory() || Memory.OffsetRegNum != 0 ||
957        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
958      return false;
959    // Immediate offset, multiple of 4 in range [0, 1020].
960    if (!Memory.OffsetImm) return true;
961    int64_t Val = Memory.OffsetImm->getValue();
962    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
963  }
964  bool isMemImm8s4Offset() const {
965    // If we have an immediate that's not a constant, treat it as a label
966    // reference needing a fixup. If it is a constant, it's something else
967    // and we reject it.
968    if (isImm() && !isa<MCConstantExpr>(getImm()))
969      return true;
970    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
971      return false;
972    // Immediate offset a multiple of 4 in range [-1020, 1020].
973    if (!Memory.OffsetImm) return true;
974    int64_t Val = Memory.OffsetImm->getValue();
975    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
976  }
977  bool isMemImm0_1020s4Offset() const {
978    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
979      return false;
980    // Immediate offset a multiple of 4 in range [0, 1020].
981    if (!Memory.OffsetImm) return true;
982    int64_t Val = Memory.OffsetImm->getValue();
983    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
984  }
985  bool isMemImm8Offset() const {
986    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
987      return false;
988    // Immediate offset in range [-255, 255].
989    if (!Memory.OffsetImm) return true;
990    int64_t Val = Memory.OffsetImm->getValue();
991    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
992  }
993  bool isMemPosImm8Offset() const {
994    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
995      return false;
996    // Immediate offset in range [0, 255].
997    if (!Memory.OffsetImm) return true;
998    int64_t Val = Memory.OffsetImm->getValue();
999    return Val >= 0 && Val < 256;
1000  }
1001  bool isMemNegImm8Offset() const {
1002    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1003      return false;
1004    // Immediate offset in range [-255, -1].
1005    if (!Memory.OffsetImm) return false;
1006    int64_t Val = Memory.OffsetImm->getValue();
1007    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1008  }
1009  bool isMemUImm12Offset() const {
1010    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1011      return false;
1012    // Immediate offset in range [0, 4095].
1013    if (!Memory.OffsetImm) return true;
1014    int64_t Val = Memory.OffsetImm->getValue();
1015    return (Val >= 0 && Val < 4096);
1016  }
1017  bool isMemImm12Offset() const {
1018    // If we have an immediate that's not a constant, treat it as a label
1019    // reference needing a fixup. If it is a constant, it's something else
1020    // and we reject it.
1021    if (isImm() && !isa<MCConstantExpr>(getImm()))
1022      return true;
1023
1024    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1025      return false;
1026    // Immediate offset in range [-4095, 4095].
1027    if (!Memory.OffsetImm) return true;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1030  }
1031  bool isPostIdxImm8() const {
1032    if (!isImm()) return false;
1033    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1034    if (!CE) return false;
1035    int64_t Val = CE->getValue();
1036    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1037  }
1038  bool isPostIdxImm8s4() const {
1039    if (!isImm()) return false;
1040    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1041    if (!CE) return false;
1042    int64_t Val = CE->getValue();
1043    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1044      (Val == INT32_MIN);
1045  }
1046
1047  bool isMSRMask() const { return Kind == k_MSRMask; }
1048  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1049
1050  // NEON operands.
1051  bool isSingleSpacedVectorList() const {
1052    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1053  }
1054  bool isDoubleSpacedVectorList() const {
1055    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1056  }
1057  bool isVecListOneD() const {
1058    if (!isSingleSpacedVectorList()) return false;
1059    return VectorList.Count == 1;
1060  }
1061
1062  bool isVecListTwoD() const {
1063    if (!isSingleSpacedVectorList()) return false;
1064    return VectorList.Count == 2;
1065  }
1066
1067  bool isVecListThreeD() const {
1068    if (!isSingleSpacedVectorList()) return false;
1069    return VectorList.Count == 3;
1070  }
1071
1072  bool isVecListFourD() const {
1073    if (!isSingleSpacedVectorList()) return false;
1074    return VectorList.Count == 4;
1075  }
1076
1077  bool isVecListTwoQ() const {
1078    if (!isDoubleSpacedVectorList()) return false;
1079    return VectorList.Count == 2;
1080  }
1081
1082  bool isSingleSpacedVectorAllLanes() const {
1083    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1084  }
1085  bool isDoubleSpacedVectorAllLanes() const {
1086    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1087  }
1088  bool isVecListOneDAllLanes() const {
1089    if (!isSingleSpacedVectorAllLanes()) return false;
1090    return VectorList.Count == 1;
1091  }
1092
1093  bool isVecListTwoDAllLanes() const {
1094    if (!isSingleSpacedVectorAllLanes()) return false;
1095    return VectorList.Count == 2;
1096  }
1097
1098  bool isVecListTwoQAllLanes() const {
1099    if (!isDoubleSpacedVectorAllLanes()) return false;
1100    return VectorList.Count == 2;
1101  }
1102
1103  bool isSingleSpacedVectorIndexed() const {
1104    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1105  }
1106  bool isDoubleSpacedVectorIndexed() const {
1107    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1108  }
1109  bool isVecListOneDByteIndexed() const {
1110    if (!isSingleSpacedVectorIndexed()) return false;
1111    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1112  }
1113
1114  bool isVecListOneDHWordIndexed() const {
1115    if (!isSingleSpacedVectorIndexed()) return false;
1116    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1117  }
1118
1119  bool isVecListOneDWordIndexed() const {
1120    if (!isSingleSpacedVectorIndexed()) return false;
1121    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1122  }
1123
1124  bool isVecListTwoDByteIndexed() const {
1125    if (!isSingleSpacedVectorIndexed()) return false;
1126    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1127  }
1128
1129  bool isVecListTwoDHWordIndexed() const {
1130    if (!isSingleSpacedVectorIndexed()) return false;
1131    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1132  }
1133
1134  bool isVecListTwoQWordIndexed() const {
1135    if (!isDoubleSpacedVectorIndexed()) return false;
1136    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1137  }
1138
1139  bool isVecListTwoQHWordIndexed() const {
1140    if (!isDoubleSpacedVectorIndexed()) return false;
1141    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1142  }
1143
1144  bool isVecListTwoDWordIndexed() const {
1145    if (!isSingleSpacedVectorIndexed()) return false;
1146    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1147  }
1148
1149  bool isVectorIndex8() const {
1150    if (Kind != k_VectorIndex) return false;
1151    return VectorIndex.Val < 8;
1152  }
1153  bool isVectorIndex16() const {
1154    if (Kind != k_VectorIndex) return false;
1155    return VectorIndex.Val < 4;
1156  }
1157  bool isVectorIndex32() const {
1158    if (Kind != k_VectorIndex) return false;
1159    return VectorIndex.Val < 2;
1160  }
1161
1162  bool isNEONi8splat() const {
1163    if (!isImm()) return false;
1164    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1165    // Must be a constant.
1166    if (!CE) return false;
1167    int64_t Value = CE->getValue();
1168    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1169    // value.
1170    return Value >= 0 && Value < 256;
1171  }
1172
1173  bool isNEONi16splat() const {
1174    if (!isImm()) return false;
1175    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1176    // Must be a constant.
1177    if (!CE) return false;
1178    int64_t Value = CE->getValue();
1179    // i16 value in the range [0,255] or [0x0100, 0xff00]
1180    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1181  }
1182
1183  bool isNEONi32splat() const {
1184    if (!isImm()) return false;
1185    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1186    // Must be a constant.
1187    if (!CE) return false;
1188    int64_t Value = CE->getValue();
1189    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1190    return (Value >= 0 && Value < 256) ||
1191      (Value >= 0x0100 && Value <= 0xff00) ||
1192      (Value >= 0x010000 && Value <= 0xff0000) ||
1193      (Value >= 0x01000000 && Value <= 0xff000000);
1194  }
1195
1196  bool isNEONi32vmov() const {
1197    if (!isImm()) return false;
1198    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1199    // Must be a constant.
1200    if (!CE) return false;
1201    int64_t Value = CE->getValue();
1202    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1203    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1204    return (Value >= 0 && Value < 256) ||
1205      (Value >= 0x0100 && Value <= 0xff00) ||
1206      (Value >= 0x010000 && Value <= 0xff0000) ||
1207      (Value >= 0x01000000 && Value <= 0xff000000) ||
1208      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1209      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1210  }
1211  bool isNEONi32vmovNeg() const {
1212    if (!isImm()) return false;
1213    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1214    // Must be a constant.
1215    if (!CE) return false;
1216    int64_t Value = ~CE->getValue();
1217    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1218    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1219    return (Value >= 0 && Value < 256) ||
1220      (Value >= 0x0100 && Value <= 0xff00) ||
1221      (Value >= 0x010000 && Value <= 0xff0000) ||
1222      (Value >= 0x01000000 && Value <= 0xff000000) ||
1223      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1224      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1225  }
1226
1227  bool isNEONi64splat() const {
1228    if (!isImm()) return false;
1229    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1230    // Must be a constant.
1231    if (!CE) return false;
1232    uint64_t Value = CE->getValue();
1233    // i64 value with each byte being either 0 or 0xff.
1234    for (unsigned i = 0; i < 8; ++i)
1235      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1236    return true;
1237  }
1238
1239  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1240    // Add as immediates when possible.  Null MCExpr = 0.
1241    if (Expr == 0)
1242      Inst.addOperand(MCOperand::CreateImm(0));
1243    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1244      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1245    else
1246      Inst.addOperand(MCOperand::CreateExpr(Expr));
1247  }
1248
1249  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1250    assert(N == 2 && "Invalid number of operands!");
1251    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1252    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1253    Inst.addOperand(MCOperand::CreateReg(RegNum));
1254  }
1255
1256  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1257    assert(N == 1 && "Invalid number of operands!");
1258    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1259  }
1260
1261  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1262    assert(N == 1 && "Invalid number of operands!");
1263    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1264  }
1265
1266  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1267    assert(N == 1 && "Invalid number of operands!");
1268    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1269  }
1270
1271  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1272    assert(N == 1 && "Invalid number of operands!");
1273    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1274  }
1275
1276  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1277    assert(N == 1 && "Invalid number of operands!");
1278    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1279  }
1280
1281  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1282    assert(N == 1 && "Invalid number of operands!");
1283    Inst.addOperand(MCOperand::CreateReg(getReg()));
1284  }
1285
1286  void addRegOperands(MCInst &Inst, unsigned N) const {
1287    assert(N == 1 && "Invalid number of operands!");
1288    Inst.addOperand(MCOperand::CreateReg(getReg()));
1289  }
1290
1291  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1292    assert(N == 3 && "Invalid number of operands!");
1293    assert(isRegShiftedReg() &&
1294           "addRegShiftedRegOperands() on non RegShiftedReg!");
1295    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1296    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1297    Inst.addOperand(MCOperand::CreateImm(
1298      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1299  }
1300
1301  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1302    assert(N == 2 && "Invalid number of operands!");
1303    assert(isRegShiftedImm() &&
1304           "addRegShiftedImmOperands() on non RegShiftedImm!");
1305    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1306    Inst.addOperand(MCOperand::CreateImm(
1307      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1308  }
1309
1310  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1311    assert(N == 1 && "Invalid number of operands!");
1312    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1313                                         ShifterImm.Imm));
1314  }
1315
1316  void addRegListOperands(MCInst &Inst, unsigned N) const {
1317    assert(N == 1 && "Invalid number of operands!");
1318    const SmallVectorImpl<unsigned> &RegList = getRegList();
1319    for (SmallVectorImpl<unsigned>::const_iterator
1320           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1321      Inst.addOperand(MCOperand::CreateReg(*I));
1322  }
1323
1324  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1325    addRegListOperands(Inst, N);
1326  }
1327
1328  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1329    addRegListOperands(Inst, N);
1330  }
1331
1332  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1333    assert(N == 1 && "Invalid number of operands!");
1334    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1335    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1336  }
1337
1338  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1339    assert(N == 1 && "Invalid number of operands!");
1340    // Munge the lsb/width into a bitfield mask.
1341    unsigned lsb = Bitfield.LSB;
1342    unsigned width = Bitfield.Width;
1343    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1344    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1345                      (32 - (lsb + width)));
1346    Inst.addOperand(MCOperand::CreateImm(Mask));
1347  }
1348
1349  void addImmOperands(MCInst &Inst, unsigned N) const {
1350    assert(N == 1 && "Invalid number of operands!");
1351    addExpr(Inst, getImm());
1352  }
1353
1354  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1355    assert(N == 1 && "Invalid number of operands!");
1356    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1357  }
1358
1359  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1360    assert(N == 1 && "Invalid number of operands!");
1361    // FIXME: We really want to scale the value here, but the LDRD/STRD
1362    // instruction don't encode operands that way yet.
1363    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1364    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1365  }
1366
1367  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1368    assert(N == 1 && "Invalid number of operands!");
1369    // The immediate is scaled by four in the encoding and is stored
1370    // in the MCInst as such. Lop off the low two bits here.
1371    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1372    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1373  }
1374
1375  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1376    assert(N == 1 && "Invalid number of operands!");
1377    // The immediate is scaled by four in the encoding and is stored
1378    // in the MCInst as such. Lop off the low two bits here.
1379    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1380    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1381  }
1382
1383  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    // The constant encodes as the immediate-1, and we store in the instruction
1386    // the bits as encoded, so subtract off one here.
1387    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1388    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1389  }
1390
1391  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1392    assert(N == 1 && "Invalid number of operands!");
1393    // The constant encodes as the immediate-1, and we store in the instruction
1394    // the bits as encoded, so subtract off one here.
1395    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1396    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1397  }
1398
1399  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    // The constant encodes as the immediate, except for 32, which encodes as
1402    // zero.
1403    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1404    unsigned Imm = CE->getValue();
1405    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1406  }
1407
1408  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1409    assert(N == 1 && "Invalid number of operands!");
1410    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1411    // the instruction as well.
1412    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1413    int Val = CE->getValue();
1414    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1415  }
1416
1417  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1418    assert(N == 1 && "Invalid number of operands!");
1419    // The operand is actually a t2_so_imm, but we have its bitwise
1420    // negation in the assembly source, so twiddle it here.
1421    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1422    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1423  }
1424
1425  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1426    assert(N == 1 && "Invalid number of operands!");
1427    // The operand is actually a t2_so_imm, but we have its
1428    // negation in the assembly source, so twiddle it here.
1429    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1430    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1431  }
1432
1433  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1434    assert(N == 1 && "Invalid number of operands!");
1435    // The operand is actually a so_imm, but we have its bitwise
1436    // negation in the assembly source, so twiddle it here.
1437    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1438    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1439  }
1440
1441  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1442    assert(N == 1 && "Invalid number of operands!");
1443    // The operand is actually a so_imm, but we have its
1444    // negation in the assembly source, so twiddle it here.
1445    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1446    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1447  }
1448
1449  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1450    assert(N == 1 && "Invalid number of operands!");
1451    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1452  }
1453
1454  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1455    assert(N == 1 && "Invalid number of operands!");
1456    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1457  }
1458
1459  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1460    assert(N == 2 && "Invalid number of operands!");
1461    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1462    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1463  }
1464
1465  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1466    assert(N == 3 && "Invalid number of operands!");
1467    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1468    if (!Memory.OffsetRegNum) {
1469      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1470      // Special case for #-0
1471      if (Val == INT32_MIN) Val = 0;
1472      if (Val < 0) Val = -Val;
1473      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1474    } else {
1475      // For register offset, we encode the shift type and negation flag
1476      // here.
1477      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1478                              Memory.ShiftImm, Memory.ShiftType);
1479    }
1480    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1481    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1482    Inst.addOperand(MCOperand::CreateImm(Val));
1483  }
1484
1485  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1486    assert(N == 2 && "Invalid number of operands!");
1487    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1488    assert(CE && "non-constant AM2OffsetImm operand!");
1489    int32_t Val = CE->getValue();
1490    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1491    // Special case for #-0
1492    if (Val == INT32_MIN) Val = 0;
1493    if (Val < 0) Val = -Val;
1494    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1495    Inst.addOperand(MCOperand::CreateReg(0));
1496    Inst.addOperand(MCOperand::CreateImm(Val));
1497  }
1498
1499  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1500    assert(N == 3 && "Invalid number of operands!");
1501    // If we have an immediate that's not a constant, treat it as a label
1502    // reference needing a fixup. If it is a constant, it's something else
1503    // and we reject it.
1504    if (isImm()) {
1505      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1506      Inst.addOperand(MCOperand::CreateReg(0));
1507      Inst.addOperand(MCOperand::CreateImm(0));
1508      return;
1509    }
1510
1511    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1512    if (!Memory.OffsetRegNum) {
1513      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1514      // Special case for #-0
1515      if (Val == INT32_MIN) Val = 0;
1516      if (Val < 0) Val = -Val;
1517      Val = ARM_AM::getAM3Opc(AddSub, Val);
1518    } else {
1519      // For register offset, we encode the shift type and negation flag
1520      // here.
1521      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1522    }
1523    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1524    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1525    Inst.addOperand(MCOperand::CreateImm(Val));
1526  }
1527
1528  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1529    assert(N == 2 && "Invalid number of operands!");
1530    if (Kind == k_PostIndexRegister) {
1531      int32_t Val =
1532        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1533      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1534      Inst.addOperand(MCOperand::CreateImm(Val));
1535      return;
1536    }
1537
1538    // Constant offset.
1539    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1540    int32_t Val = CE->getValue();
1541    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1542    // Special case for #-0
1543    if (Val == INT32_MIN) Val = 0;
1544    if (Val < 0) Val = -Val;
1545    Val = ARM_AM::getAM3Opc(AddSub, Val);
1546    Inst.addOperand(MCOperand::CreateReg(0));
1547    Inst.addOperand(MCOperand::CreateImm(Val));
1548  }
1549
1550  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1551    assert(N == 2 && "Invalid number of operands!");
1552    // If we have an immediate that's not a constant, treat it as a label
1553    // reference needing a fixup. If it is a constant, it's something else
1554    // and we reject it.
1555    if (isImm()) {
1556      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1557      Inst.addOperand(MCOperand::CreateImm(0));
1558      return;
1559    }
1560
1561    // The lower two bits are always zero and as such are not encoded.
1562    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1563    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1564    // Special case for #-0
1565    if (Val == INT32_MIN) Val = 0;
1566    if (Val < 0) Val = -Val;
1567    Val = ARM_AM::getAM5Opc(AddSub, Val);
1568    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1569    Inst.addOperand(MCOperand::CreateImm(Val));
1570  }
1571
1572  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1573    assert(N == 2 && "Invalid number of operands!");
1574    // If we have an immediate that's not a constant, treat it as a label
1575    // reference needing a fixup. If it is a constant, it's something else
1576    // and we reject it.
1577    if (isImm()) {
1578      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1579      Inst.addOperand(MCOperand::CreateImm(0));
1580      return;
1581    }
1582
1583    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1584    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1585    Inst.addOperand(MCOperand::CreateImm(Val));
1586  }
1587
1588  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1589    assert(N == 2 && "Invalid number of operands!");
1590    // The lower two bits are always zero and as such are not encoded.
1591    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1592    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1593    Inst.addOperand(MCOperand::CreateImm(Val));
1594  }
1595
1596  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1597    assert(N == 2 && "Invalid number of operands!");
1598    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1599    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1600    Inst.addOperand(MCOperand::CreateImm(Val));
1601  }
1602
1603  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1604    addMemImm8OffsetOperands(Inst, N);
1605  }
1606
1607  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1608    addMemImm8OffsetOperands(Inst, N);
1609  }
1610
1611  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1612    assert(N == 2 && "Invalid number of operands!");
1613    // If this is an immediate, it's a label reference.
1614    if (isImm()) {
1615      addExpr(Inst, getImm());
1616      Inst.addOperand(MCOperand::CreateImm(0));
1617      return;
1618    }
1619
1620    // Otherwise, it's a normal memory reg+offset.
1621    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1622    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1623    Inst.addOperand(MCOperand::CreateImm(Val));
1624  }
1625
1626  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1627    assert(N == 2 && "Invalid number of operands!");
1628    // If this is an immediate, it's a label reference.
1629    if (isImm()) {
1630      addExpr(Inst, getImm());
1631      Inst.addOperand(MCOperand::CreateImm(0));
1632      return;
1633    }
1634
1635    // Otherwise, it's a normal memory reg+offset.
1636    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1637    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1638    Inst.addOperand(MCOperand::CreateImm(Val));
1639  }
1640
1641  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1642    assert(N == 2 && "Invalid number of operands!");
1643    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1644    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1645  }
1646
1647  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1648    assert(N == 2 && "Invalid number of operands!");
1649    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1650    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1651  }
1652
1653  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1654    assert(N == 3 && "Invalid number of operands!");
1655    unsigned Val =
1656      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1657                        Memory.ShiftImm, Memory.ShiftType);
1658    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1659    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1660    Inst.addOperand(MCOperand::CreateImm(Val));
1661  }
1662
1663  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1664    assert(N == 3 && "Invalid number of operands!");
1665    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1666    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1667    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1668  }
1669
1670  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1671    assert(N == 2 && "Invalid number of operands!");
1672    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1673    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1674  }
1675
1676  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1677    assert(N == 2 && "Invalid number of operands!");
1678    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1679    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1680    Inst.addOperand(MCOperand::CreateImm(Val));
1681  }
1682
1683  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1684    assert(N == 2 && "Invalid number of operands!");
1685    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1686    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1687    Inst.addOperand(MCOperand::CreateImm(Val));
1688  }
1689
1690  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1691    assert(N == 2 && "Invalid number of operands!");
1692    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1693    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1694    Inst.addOperand(MCOperand::CreateImm(Val));
1695  }
1696
1697  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1698    assert(N == 2 && "Invalid number of operands!");
1699    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1700    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1701    Inst.addOperand(MCOperand::CreateImm(Val));
1702  }
1703
1704  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1705    assert(N == 1 && "Invalid number of operands!");
1706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1707    assert(CE && "non-constant post-idx-imm8 operand!");
1708    int Imm = CE->getValue();
1709    bool isAdd = Imm >= 0;
1710    if (Imm == INT32_MIN) Imm = 0;
1711    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1712    Inst.addOperand(MCOperand::CreateImm(Imm));
1713  }
1714
1715  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1716    assert(N == 1 && "Invalid number of operands!");
1717    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1718    assert(CE && "non-constant post-idx-imm8s4 operand!");
1719    int Imm = CE->getValue();
1720    bool isAdd = Imm >= 0;
1721    if (Imm == INT32_MIN) Imm = 0;
1722    // Immediate is scaled by 4.
1723    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1724    Inst.addOperand(MCOperand::CreateImm(Imm));
1725  }
1726
1727  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1728    assert(N == 2 && "Invalid number of operands!");
1729    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1730    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1731  }
1732
1733  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1734    assert(N == 2 && "Invalid number of operands!");
1735    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1736    // The sign, shift type, and shift amount are encoded in a single operand
1737    // using the AM2 encoding helpers.
1738    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1739    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1740                                     PostIdxReg.ShiftTy);
1741    Inst.addOperand(MCOperand::CreateImm(Imm));
1742  }
1743
1744  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1745    assert(N == 1 && "Invalid number of operands!");
1746    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1747  }
1748
1749  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1750    assert(N == 1 && "Invalid number of operands!");
1751    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1752  }
1753
1754  void addVecListOperands(MCInst &Inst, unsigned N) const {
1755    assert(N == 1 && "Invalid number of operands!");
1756    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1757  }
1758
1759  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1760    assert(N == 2 && "Invalid number of operands!");
1761    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1762    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1763  }
1764
1765  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1766    assert(N == 1 && "Invalid number of operands!");
1767    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1768  }
1769
1770  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1771    assert(N == 1 && "Invalid number of operands!");
1772    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1773  }
1774
1775  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1776    assert(N == 1 && "Invalid number of operands!");
1777    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1778  }
1779
1780  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1781    assert(N == 1 && "Invalid number of operands!");
1782    // The immediate encodes the type of constant as well as the value.
1783    // Mask in that this is an i8 splat.
1784    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1785    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1786  }
1787
1788  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1789    assert(N == 1 && "Invalid number of operands!");
1790    // The immediate encodes the type of constant as well as the value.
1791    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1792    unsigned Value = CE->getValue();
1793    if (Value >= 256)
1794      Value = (Value >> 8) | 0xa00;
1795    else
1796      Value |= 0x800;
1797    Inst.addOperand(MCOperand::CreateImm(Value));
1798  }
1799
1800  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1801    assert(N == 1 && "Invalid number of operands!");
1802    // The immediate encodes the type of constant as well as the value.
1803    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1804    unsigned Value = CE->getValue();
1805    if (Value >= 256 && Value <= 0xff00)
1806      Value = (Value >> 8) | 0x200;
1807    else if (Value > 0xffff && Value <= 0xff0000)
1808      Value = (Value >> 16) | 0x400;
1809    else if (Value > 0xffffff)
1810      Value = (Value >> 24) | 0x600;
1811    Inst.addOperand(MCOperand::CreateImm(Value));
1812  }
1813
1814  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1815    assert(N == 1 && "Invalid number of operands!");
1816    // The immediate encodes the type of constant as well as the value.
1817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1818    unsigned Value = CE->getValue();
1819    if (Value >= 256 && Value <= 0xffff)
1820      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1821    else if (Value > 0xffff && Value <= 0xffffff)
1822      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1823    else if (Value > 0xffffff)
1824      Value = (Value >> 24) | 0x600;
1825    Inst.addOperand(MCOperand::CreateImm(Value));
1826  }
1827
1828  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1829    assert(N == 1 && "Invalid number of operands!");
1830    // The immediate encodes the type of constant as well as the value.
1831    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1832    unsigned Value = ~CE->getValue();
1833    if (Value >= 256 && Value <= 0xffff)
1834      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1835    else if (Value > 0xffff && Value <= 0xffffff)
1836      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1837    else if (Value > 0xffffff)
1838      Value = (Value >> 24) | 0x600;
1839    Inst.addOperand(MCOperand::CreateImm(Value));
1840  }
1841
1842  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1843    assert(N == 1 && "Invalid number of operands!");
1844    // The immediate encodes the type of constant as well as the value.
1845    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1846    uint64_t Value = CE->getValue();
1847    unsigned Imm = 0;
1848    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1849      Imm |= (Value & 1) << i;
1850    }
1851    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1852  }
1853
1854  virtual void print(raw_ostream &OS) const;
1855
1856  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1857    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1858    Op->ITMask.Mask = Mask;
1859    Op->StartLoc = S;
1860    Op->EndLoc = S;
1861    return Op;
1862  }
1863
1864  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1865    ARMOperand *Op = new ARMOperand(k_CondCode);
1866    Op->CC.Val = CC;
1867    Op->StartLoc = S;
1868    Op->EndLoc = S;
1869    return Op;
1870  }
1871
1872  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1873    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1874    Op->Cop.Val = CopVal;
1875    Op->StartLoc = S;
1876    Op->EndLoc = S;
1877    return Op;
1878  }
1879
1880  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1881    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1882    Op->Cop.Val = CopVal;
1883    Op->StartLoc = S;
1884    Op->EndLoc = S;
1885    return Op;
1886  }
1887
1888  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1889    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1890    Op->Cop.Val = Val;
1891    Op->StartLoc = S;
1892    Op->EndLoc = E;
1893    return Op;
1894  }
1895
1896  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1897    ARMOperand *Op = new ARMOperand(k_CCOut);
1898    Op->Reg.RegNum = RegNum;
1899    Op->StartLoc = S;
1900    Op->EndLoc = S;
1901    return Op;
1902  }
1903
1904  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1905    ARMOperand *Op = new ARMOperand(k_Token);
1906    Op->Tok.Data = Str.data();
1907    Op->Tok.Length = Str.size();
1908    Op->StartLoc = S;
1909    Op->EndLoc = S;
1910    return Op;
1911  }
1912
1913  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1914    ARMOperand *Op = new ARMOperand(k_Register);
1915    Op->Reg.RegNum = RegNum;
1916    Op->StartLoc = S;
1917    Op->EndLoc = E;
1918    return Op;
1919  }
1920
1921  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1922                                           unsigned SrcReg,
1923                                           unsigned ShiftReg,
1924                                           unsigned ShiftImm,
1925                                           SMLoc S, SMLoc E) {
1926    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1927    Op->RegShiftedReg.ShiftTy = ShTy;
1928    Op->RegShiftedReg.SrcReg = SrcReg;
1929    Op->RegShiftedReg.ShiftReg = ShiftReg;
1930    Op->RegShiftedReg.ShiftImm = ShiftImm;
1931    Op->StartLoc = S;
1932    Op->EndLoc = E;
1933    return Op;
1934  }
1935
1936  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1937                                            unsigned SrcReg,
1938                                            unsigned ShiftImm,
1939                                            SMLoc S, SMLoc E) {
1940    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1941    Op->RegShiftedImm.ShiftTy = ShTy;
1942    Op->RegShiftedImm.SrcReg = SrcReg;
1943    Op->RegShiftedImm.ShiftImm = ShiftImm;
1944    Op->StartLoc = S;
1945    Op->EndLoc = E;
1946    return Op;
1947  }
1948
1949  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1950                                   SMLoc S, SMLoc E) {
1951    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1952    Op->ShifterImm.isASR = isASR;
1953    Op->ShifterImm.Imm = Imm;
1954    Op->StartLoc = S;
1955    Op->EndLoc = E;
1956    return Op;
1957  }
1958
1959  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1960    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1961    Op->RotImm.Imm = Imm;
1962    Op->StartLoc = S;
1963    Op->EndLoc = E;
1964    return Op;
1965  }
1966
1967  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1968                                    SMLoc S, SMLoc E) {
1969    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1970    Op->Bitfield.LSB = LSB;
1971    Op->Bitfield.Width = Width;
1972    Op->StartLoc = S;
1973    Op->EndLoc = E;
1974    return Op;
1975  }
1976
1977  static ARMOperand *
1978  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1979                SMLoc StartLoc, SMLoc EndLoc) {
1980    KindTy Kind = k_RegisterList;
1981
1982    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1983      Kind = k_DPRRegisterList;
1984    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
1985             contains(Regs.front().first))
1986      Kind = k_SPRRegisterList;
1987
1988    ARMOperand *Op = new ARMOperand(Kind);
1989    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
1990           I = Regs.begin(), E = Regs.end(); I != E; ++I)
1991      Op->Registers.push_back(I->first);
1992    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
1993    Op->StartLoc = StartLoc;
1994    Op->EndLoc = EndLoc;
1995    return Op;
1996  }
1997
1998  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
1999                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2000    ARMOperand *Op = new ARMOperand(k_VectorList);
2001    Op->VectorList.RegNum = RegNum;
2002    Op->VectorList.Count = Count;
2003    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2004    Op->StartLoc = S;
2005    Op->EndLoc = E;
2006    return Op;
2007  }
2008
2009  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2010                                              bool isDoubleSpaced,
2011                                              SMLoc S, SMLoc E) {
2012    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2013    Op->VectorList.RegNum = RegNum;
2014    Op->VectorList.Count = Count;
2015    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2016    Op->StartLoc = S;
2017    Op->EndLoc = E;
2018    return Op;
2019  }
2020
2021  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2022                                             unsigned Index,
2023                                             bool isDoubleSpaced,
2024                                             SMLoc S, SMLoc E) {
2025    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2026    Op->VectorList.RegNum = RegNum;
2027    Op->VectorList.Count = Count;
2028    Op->VectorList.LaneIndex = Index;
2029    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2030    Op->StartLoc = S;
2031    Op->EndLoc = E;
2032    return Op;
2033  }
2034
2035  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2036                                       MCContext &Ctx) {
2037    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2038    Op->VectorIndex.Val = Idx;
2039    Op->StartLoc = S;
2040    Op->EndLoc = E;
2041    return Op;
2042  }
2043
2044  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2045    ARMOperand *Op = new ARMOperand(k_Immediate);
2046    Op->Imm.Val = Val;
2047    Op->StartLoc = S;
2048    Op->EndLoc = E;
2049    return Op;
2050  }
2051
2052  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2053    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2054    Op->FPImm.Val = Val;
2055    Op->StartLoc = S;
2056    Op->EndLoc = S;
2057    return Op;
2058  }
2059
2060  static ARMOperand *CreateMem(unsigned BaseRegNum,
2061                               const MCConstantExpr *OffsetImm,
2062                               unsigned OffsetRegNum,
2063                               ARM_AM::ShiftOpc ShiftType,
2064                               unsigned ShiftImm,
2065                               unsigned Alignment,
2066                               bool isNegative,
2067                               SMLoc S, SMLoc E) {
2068    ARMOperand *Op = new ARMOperand(k_Memory);
2069    Op->Memory.BaseRegNum = BaseRegNum;
2070    Op->Memory.OffsetImm = OffsetImm;
2071    Op->Memory.OffsetRegNum = OffsetRegNum;
2072    Op->Memory.ShiftType = ShiftType;
2073    Op->Memory.ShiftImm = ShiftImm;
2074    Op->Memory.Alignment = Alignment;
2075    Op->Memory.isNegative = isNegative;
2076    Op->StartLoc = S;
2077    Op->EndLoc = E;
2078    return Op;
2079  }
2080
2081  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2082                                      ARM_AM::ShiftOpc ShiftTy,
2083                                      unsigned ShiftImm,
2084                                      SMLoc S, SMLoc E) {
2085    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2086    Op->PostIdxReg.RegNum = RegNum;
2087    Op->PostIdxReg.isAdd = isAdd;
2088    Op->PostIdxReg.ShiftTy = ShiftTy;
2089    Op->PostIdxReg.ShiftImm = ShiftImm;
2090    Op->StartLoc = S;
2091    Op->EndLoc = E;
2092    return Op;
2093  }
2094
2095  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2096    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2097    Op->MBOpt.Val = Opt;
2098    Op->StartLoc = S;
2099    Op->EndLoc = S;
2100    return Op;
2101  }
2102
2103  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2104    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2105    Op->IFlags.Val = IFlags;
2106    Op->StartLoc = S;
2107    Op->EndLoc = S;
2108    return Op;
2109  }
2110
2111  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2112    ARMOperand *Op = new ARMOperand(k_MSRMask);
2113    Op->MMask.Val = MMask;
2114    Op->StartLoc = S;
2115    Op->EndLoc = S;
2116    return Op;
2117  }
2118};
2119
2120} // end anonymous namespace.
2121
2122void ARMOperand::print(raw_ostream &OS) const {
2123  switch (Kind) {
2124  case k_FPImmediate:
2125    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2126       << ") >";
2127    break;
2128  case k_CondCode:
2129    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2130    break;
2131  case k_CCOut:
2132    OS << "<ccout " << getReg() << ">";
2133    break;
2134  case k_ITCondMask: {
2135    static const char *MaskStr[] = {
2136      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2137      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2138    };
2139    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2140    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2141    break;
2142  }
2143  case k_CoprocNum:
2144    OS << "<coprocessor number: " << getCoproc() << ">";
2145    break;
2146  case k_CoprocReg:
2147    OS << "<coprocessor register: " << getCoproc() << ">";
2148    break;
2149  case k_CoprocOption:
2150    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2151    break;
2152  case k_MSRMask:
2153    OS << "<mask: " << getMSRMask() << ">";
2154    break;
2155  case k_Immediate:
2156    getImm()->print(OS);
2157    break;
2158  case k_MemBarrierOpt:
2159    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2160    break;
2161  case k_Memory:
2162    OS << "<memory "
2163       << " base:" << Memory.BaseRegNum;
2164    OS << ">";
2165    break;
2166  case k_PostIndexRegister:
2167    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2168       << PostIdxReg.RegNum;
2169    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2170      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2171         << PostIdxReg.ShiftImm;
2172    OS << ">";
2173    break;
2174  case k_ProcIFlags: {
2175    OS << "<ARM_PROC::";
2176    unsigned IFlags = getProcIFlags();
2177    for (int i=2; i >= 0; --i)
2178      if (IFlags & (1 << i))
2179        OS << ARM_PROC::IFlagsToString(1 << i);
2180    OS << ">";
2181    break;
2182  }
2183  case k_Register:
2184    OS << "<register " << getReg() << ">";
2185    break;
2186  case k_ShifterImmediate:
2187    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2188       << " #" << ShifterImm.Imm << ">";
2189    break;
2190  case k_ShiftedRegister:
2191    OS << "<so_reg_reg "
2192       << RegShiftedReg.SrcReg << " "
2193       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2194       << " " << RegShiftedReg.ShiftReg << ">";
2195    break;
2196  case k_ShiftedImmediate:
2197    OS << "<so_reg_imm "
2198       << RegShiftedImm.SrcReg << " "
2199       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2200       << " #" << RegShiftedImm.ShiftImm << ">";
2201    break;
2202  case k_RotateImmediate:
2203    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2204    break;
2205  case k_BitfieldDescriptor:
2206    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2207       << ", width: " << Bitfield.Width << ">";
2208    break;
2209  case k_RegisterList:
2210  case k_DPRRegisterList:
2211  case k_SPRRegisterList: {
2212    OS << "<register_list ";
2213
2214    const SmallVectorImpl<unsigned> &RegList = getRegList();
2215    for (SmallVectorImpl<unsigned>::const_iterator
2216           I = RegList.begin(), E = RegList.end(); I != E; ) {
2217      OS << *I;
2218      if (++I < E) OS << ", ";
2219    }
2220
2221    OS << ">";
2222    break;
2223  }
2224  case k_VectorList:
2225    OS << "<vector_list " << VectorList.Count << " * "
2226       << VectorList.RegNum << ">";
2227    break;
2228  case k_VectorListAllLanes:
2229    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2230       << VectorList.RegNum << ">";
2231    break;
2232  case k_VectorListIndexed:
2233    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2234       << VectorList.Count << " * " << VectorList.RegNum << ">";
2235    break;
2236  case k_Token:
2237    OS << "'" << getToken() << "'";
2238    break;
2239  case k_VectorIndex:
2240    OS << "<vectorindex " << getVectorIndex() << ">";
2241    break;
2242  }
2243}
2244
2245/// @name Auto-generated Match Functions
2246/// {
2247
2248static unsigned MatchRegisterName(StringRef Name);
2249
2250/// }
2251
2252bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2253                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2254  StartLoc = Parser.getTok().getLoc();
2255  RegNo = tryParseRegister();
2256  EndLoc = Parser.getTok().getLoc();
2257
2258  return (RegNo == (unsigned)-1);
2259}
2260
2261/// Try to parse a register name.  The token must be an Identifier when called,
2262/// and if it is a register name the token is eaten and the register number is
2263/// returned.  Otherwise return -1.
2264///
2265int ARMAsmParser::tryParseRegister() {
2266  const AsmToken &Tok = Parser.getTok();
2267  if (Tok.isNot(AsmToken::Identifier)) return -1;
2268
2269  std::string lowerCase = Tok.getString().lower();
2270  unsigned RegNum = MatchRegisterName(lowerCase);
2271  if (!RegNum) {
2272    RegNum = StringSwitch<unsigned>(lowerCase)
2273      .Case("r13", ARM::SP)
2274      .Case("r14", ARM::LR)
2275      .Case("r15", ARM::PC)
2276      .Case("ip", ARM::R12)
2277      // Additional register name aliases for 'gas' compatibility.
2278      .Case("a1", ARM::R0)
2279      .Case("a2", ARM::R1)
2280      .Case("a3", ARM::R2)
2281      .Case("a4", ARM::R3)
2282      .Case("v1", ARM::R4)
2283      .Case("v2", ARM::R5)
2284      .Case("v3", ARM::R6)
2285      .Case("v4", ARM::R7)
2286      .Case("v5", ARM::R8)
2287      .Case("v6", ARM::R9)
2288      .Case("v7", ARM::R10)
2289      .Case("v8", ARM::R11)
2290      .Case("sb", ARM::R9)
2291      .Case("sl", ARM::R10)
2292      .Case("fp", ARM::R11)
2293      .Default(0);
2294  }
2295  if (!RegNum) {
2296    // Check for aliases registered via .req. Canonicalize to lower case.
2297    // That's more consistent since register names are case insensitive, and
2298    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2299    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2300    // If no match, return failure.
2301    if (Entry == RegisterReqs.end())
2302      return -1;
2303    Parser.Lex(); // Eat identifier token.
2304    return Entry->getValue();
2305  }
2306
2307  Parser.Lex(); // Eat identifier token.
2308
2309  return RegNum;
2310}
2311
2312// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2313// If a recoverable error occurs, return 1. If an irrecoverable error
2314// occurs, return -1. An irrecoverable error is one where tokens have been
2315// consumed in the process of trying to parse the shifter (i.e., when it is
2316// indeed a shifter operand, but malformed).
2317int ARMAsmParser::tryParseShiftRegister(
2318                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2319  SMLoc S = Parser.getTok().getLoc();
2320  const AsmToken &Tok = Parser.getTok();
2321  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2322
2323  std::string lowerCase = Tok.getString().lower();
2324  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2325      .Case("asl", ARM_AM::lsl)
2326      .Case("lsl", ARM_AM::lsl)
2327      .Case("lsr", ARM_AM::lsr)
2328      .Case("asr", ARM_AM::asr)
2329      .Case("ror", ARM_AM::ror)
2330      .Case("rrx", ARM_AM::rrx)
2331      .Default(ARM_AM::no_shift);
2332
2333  if (ShiftTy == ARM_AM::no_shift)
2334    return 1;
2335
2336  Parser.Lex(); // Eat the operator.
2337
2338  // The source register for the shift has already been added to the
2339  // operand list, so we need to pop it off and combine it into the shifted
2340  // register operand instead.
2341  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2342  if (!PrevOp->isReg())
2343    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2344  int SrcReg = PrevOp->getReg();
2345  int64_t Imm = 0;
2346  int ShiftReg = 0;
2347  if (ShiftTy == ARM_AM::rrx) {
2348    // RRX Doesn't have an explicit shift amount. The encoder expects
2349    // the shift register to be the same as the source register. Seems odd,
2350    // but OK.
2351    ShiftReg = SrcReg;
2352  } else {
2353    // Figure out if this is shifted by a constant or a register (for non-RRX).
2354    if (Parser.getTok().is(AsmToken::Hash) ||
2355        Parser.getTok().is(AsmToken::Dollar)) {
2356      Parser.Lex(); // Eat hash.
2357      SMLoc ImmLoc = Parser.getTok().getLoc();
2358      const MCExpr *ShiftExpr = 0;
2359      if (getParser().ParseExpression(ShiftExpr)) {
2360        Error(ImmLoc, "invalid immediate shift value");
2361        return -1;
2362      }
2363      // The expression must be evaluatable as an immediate.
2364      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2365      if (!CE) {
2366        Error(ImmLoc, "invalid immediate shift value");
2367        return -1;
2368      }
2369      // Range check the immediate.
2370      // lsl, ror: 0 <= imm <= 31
2371      // lsr, asr: 0 <= imm <= 32
2372      Imm = CE->getValue();
2373      if (Imm < 0 ||
2374          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2375          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2376        Error(ImmLoc, "immediate shift value out of range");
2377        return -1;
2378      }
2379      // shift by zero is a nop. Always send it through as lsl.
2380      // ('as' compatibility)
2381      if (Imm == 0)
2382        ShiftTy = ARM_AM::lsl;
2383    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2384      ShiftReg = tryParseRegister();
2385      SMLoc L = Parser.getTok().getLoc();
2386      if (ShiftReg == -1) {
2387        Error (L, "expected immediate or register in shift operand");
2388        return -1;
2389      }
2390    } else {
2391      Error (Parser.getTok().getLoc(),
2392                    "expected immediate or register in shift operand");
2393      return -1;
2394    }
2395  }
2396
2397  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2398    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2399                                                         ShiftReg, Imm,
2400                                               S, Parser.getTok().getLoc()));
2401  else
2402    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2403                                               S, Parser.getTok().getLoc()));
2404
2405  return 0;
2406}
2407
2408
2409/// Try to parse a register name.  The token must be an Identifier when called.
2410/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2411/// if there is a "writeback". 'true' if it's not a register.
2412///
2413/// TODO this is likely to change to allow different register types and or to
2414/// parse for a specific register type.
2415bool ARMAsmParser::
2416tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2417  SMLoc S = Parser.getTok().getLoc();
2418  int RegNo = tryParseRegister();
2419  if (RegNo == -1)
2420    return true;
2421
2422  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2423
2424  const AsmToken &ExclaimTok = Parser.getTok();
2425  if (ExclaimTok.is(AsmToken::Exclaim)) {
2426    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2427                                               ExclaimTok.getLoc()));
2428    Parser.Lex(); // Eat exclaim token
2429    return false;
2430  }
2431
2432  // Also check for an index operand. This is only legal for vector registers,
2433  // but that'll get caught OK in operand matching, so we don't need to
2434  // explicitly filter everything else out here.
2435  if (Parser.getTok().is(AsmToken::LBrac)) {
2436    SMLoc SIdx = Parser.getTok().getLoc();
2437    Parser.Lex(); // Eat left bracket token.
2438
2439    const MCExpr *ImmVal;
2440    if (getParser().ParseExpression(ImmVal))
2441      return MatchOperand_ParseFail;
2442    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2443    if (!MCE) {
2444      TokError("immediate value expected for vector index");
2445      return MatchOperand_ParseFail;
2446    }
2447
2448    SMLoc E = Parser.getTok().getLoc();
2449    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2450      Error(E, "']' expected");
2451      return MatchOperand_ParseFail;
2452    }
2453
2454    Parser.Lex(); // Eat right bracket token.
2455
2456    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2457                                                     SIdx, E,
2458                                                     getContext()));
2459  }
2460
2461  return false;
2462}
2463
2464/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2465/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2466/// "c5", ...
2467static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2468  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2469  // but efficient.
2470  switch (Name.size()) {
2471  default: break;
2472  case 2:
2473    if (Name[0] != CoprocOp)
2474      return -1;
2475    switch (Name[1]) {
2476    default:  return -1;
2477    case '0': return 0;
2478    case '1': return 1;
2479    case '2': return 2;
2480    case '3': return 3;
2481    case '4': return 4;
2482    case '5': return 5;
2483    case '6': return 6;
2484    case '7': return 7;
2485    case '8': return 8;
2486    case '9': return 9;
2487    }
2488    break;
2489  case 3:
2490    if (Name[0] != CoprocOp || Name[1] != '1')
2491      return -1;
2492    switch (Name[2]) {
2493    default:  return -1;
2494    case '0': return 10;
2495    case '1': return 11;
2496    case '2': return 12;
2497    case '3': return 13;
2498    case '4': return 14;
2499    case '5': return 15;
2500    }
2501    break;
2502  }
2503
2504  return -1;
2505}
2506
2507/// parseITCondCode - Try to parse a condition code for an IT instruction.
2508ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2509parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2510  SMLoc S = Parser.getTok().getLoc();
2511  const AsmToken &Tok = Parser.getTok();
2512  if (!Tok.is(AsmToken::Identifier))
2513    return MatchOperand_NoMatch;
2514  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2515    .Case("eq", ARMCC::EQ)
2516    .Case("ne", ARMCC::NE)
2517    .Case("hs", ARMCC::HS)
2518    .Case("cs", ARMCC::HS)
2519    .Case("lo", ARMCC::LO)
2520    .Case("cc", ARMCC::LO)
2521    .Case("mi", ARMCC::MI)
2522    .Case("pl", ARMCC::PL)
2523    .Case("vs", ARMCC::VS)
2524    .Case("vc", ARMCC::VC)
2525    .Case("hi", ARMCC::HI)
2526    .Case("ls", ARMCC::LS)
2527    .Case("ge", ARMCC::GE)
2528    .Case("lt", ARMCC::LT)
2529    .Case("gt", ARMCC::GT)
2530    .Case("le", ARMCC::LE)
2531    .Case("al", ARMCC::AL)
2532    .Default(~0U);
2533  if (CC == ~0U)
2534    return MatchOperand_NoMatch;
2535  Parser.Lex(); // Eat the token.
2536
2537  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2538
2539  return MatchOperand_Success;
2540}
2541
2542/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2543/// token must be an Identifier when called, and if it is a coprocessor
2544/// number, the token is eaten and the operand is added to the operand list.
2545ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2546parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2547  SMLoc S = Parser.getTok().getLoc();
2548  const AsmToken &Tok = Parser.getTok();
2549  if (Tok.isNot(AsmToken::Identifier))
2550    return MatchOperand_NoMatch;
2551
2552  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2553  if (Num == -1)
2554    return MatchOperand_NoMatch;
2555
2556  Parser.Lex(); // Eat identifier token.
2557  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2558  return MatchOperand_Success;
2559}
2560
2561/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2562/// token must be an Identifier when called, and if it is a coprocessor
2563/// number, the token is eaten and the operand is added to the operand list.
2564ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2565parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2566  SMLoc S = Parser.getTok().getLoc();
2567  const AsmToken &Tok = Parser.getTok();
2568  if (Tok.isNot(AsmToken::Identifier))
2569    return MatchOperand_NoMatch;
2570
2571  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2572  if (Reg == -1)
2573    return MatchOperand_NoMatch;
2574
2575  Parser.Lex(); // Eat identifier token.
2576  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2577  return MatchOperand_Success;
2578}
2579
2580/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2581/// coproc_option : '{' imm0_255 '}'
2582ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2583parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2584  SMLoc S = Parser.getTok().getLoc();
2585
2586  // If this isn't a '{', this isn't a coprocessor immediate operand.
2587  if (Parser.getTok().isNot(AsmToken::LCurly))
2588    return MatchOperand_NoMatch;
2589  Parser.Lex(); // Eat the '{'
2590
2591  const MCExpr *Expr;
2592  SMLoc Loc = Parser.getTok().getLoc();
2593  if (getParser().ParseExpression(Expr)) {
2594    Error(Loc, "illegal expression");
2595    return MatchOperand_ParseFail;
2596  }
2597  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2598  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2599    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2600    return MatchOperand_ParseFail;
2601  }
2602  int Val = CE->getValue();
2603
2604  // Check for and consume the closing '}'
2605  if (Parser.getTok().isNot(AsmToken::RCurly))
2606    return MatchOperand_ParseFail;
2607  SMLoc E = Parser.getTok().getLoc();
2608  Parser.Lex(); // Eat the '}'
2609
2610  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2611  return MatchOperand_Success;
2612}
2613
2614// For register list parsing, we need to map from raw GPR register numbering
2615// to the enumeration values. The enumeration values aren't sorted by
2616// register number due to our using "sp", "lr" and "pc" as canonical names.
2617static unsigned getNextRegister(unsigned Reg) {
2618  // If this is a GPR, we need to do it manually, otherwise we can rely
2619  // on the sort ordering of the enumeration since the other reg-classes
2620  // are sane.
2621  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2622    return Reg + 1;
2623  switch(Reg) {
2624  default: assert(0 && "Invalid GPR number!");
2625  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2626  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2627  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2628  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2629  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2630  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2631  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2632  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2633  }
2634}
2635
2636// Return the low-subreg of a given Q register.
2637static unsigned getDRegFromQReg(unsigned QReg) {
2638  switch (QReg) {
2639  default: llvm_unreachable("expected a Q register!");
2640  case ARM::Q0:  return ARM::D0;
2641  case ARM::Q1:  return ARM::D2;
2642  case ARM::Q2:  return ARM::D4;
2643  case ARM::Q3:  return ARM::D6;
2644  case ARM::Q4:  return ARM::D8;
2645  case ARM::Q5:  return ARM::D10;
2646  case ARM::Q6:  return ARM::D12;
2647  case ARM::Q7:  return ARM::D14;
2648  case ARM::Q8:  return ARM::D16;
2649  case ARM::Q9:  return ARM::D18;
2650  case ARM::Q10: return ARM::D20;
2651  case ARM::Q11: return ARM::D22;
2652  case ARM::Q12: return ARM::D24;
2653  case ARM::Q13: return ARM::D26;
2654  case ARM::Q14: return ARM::D28;
2655  case ARM::Q15: return ARM::D30;
2656  }
2657}
2658
2659/// Parse a register list.
2660bool ARMAsmParser::
2661parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2662  assert(Parser.getTok().is(AsmToken::LCurly) &&
2663         "Token is not a Left Curly Brace");
2664  SMLoc S = Parser.getTok().getLoc();
2665  Parser.Lex(); // Eat '{' token.
2666  SMLoc RegLoc = Parser.getTok().getLoc();
2667
2668  // Check the first register in the list to see what register class
2669  // this is a list of.
2670  int Reg = tryParseRegister();
2671  if (Reg == -1)
2672    return Error(RegLoc, "register expected");
2673
2674  // The reglist instructions have at most 16 registers, so reserve
2675  // space for that many.
2676  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2677
2678  // Allow Q regs and just interpret them as the two D sub-registers.
2679  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2680    Reg = getDRegFromQReg(Reg);
2681    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2682    ++Reg;
2683  }
2684  const MCRegisterClass *RC;
2685  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2686    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2687  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2688    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2689  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2690    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2691  else
2692    return Error(RegLoc, "invalid register in register list");
2693
2694  // Store the register.
2695  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2696
2697  // This starts immediately after the first register token in the list,
2698  // so we can see either a comma or a minus (range separator) as a legal
2699  // next token.
2700  while (Parser.getTok().is(AsmToken::Comma) ||
2701         Parser.getTok().is(AsmToken::Minus)) {
2702    if (Parser.getTok().is(AsmToken::Minus)) {
2703      Parser.Lex(); // Eat the minus.
2704      SMLoc EndLoc = Parser.getTok().getLoc();
2705      int EndReg = tryParseRegister();
2706      if (EndReg == -1)
2707        return Error(EndLoc, "register expected");
2708      // Allow Q regs and just interpret them as the two D sub-registers.
2709      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2710        EndReg = getDRegFromQReg(EndReg) + 1;
2711      // If the register is the same as the start reg, there's nothing
2712      // more to do.
2713      if (Reg == EndReg)
2714        continue;
2715      // The register must be in the same register class as the first.
2716      if (!RC->contains(EndReg))
2717        return Error(EndLoc, "invalid register in register list");
2718      // Ranges must go from low to high.
2719      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2720        return Error(EndLoc, "bad range in register list");
2721
2722      // Add all the registers in the range to the register list.
2723      while (Reg != EndReg) {
2724        Reg = getNextRegister(Reg);
2725        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2726      }
2727      continue;
2728    }
2729    Parser.Lex(); // Eat the comma.
2730    RegLoc = Parser.getTok().getLoc();
2731    int OldReg = Reg;
2732    const AsmToken RegTok = Parser.getTok();
2733    Reg = tryParseRegister();
2734    if (Reg == -1)
2735      return Error(RegLoc, "register expected");
2736    // Allow Q regs and just interpret them as the two D sub-registers.
2737    bool isQReg = false;
2738    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2739      Reg = getDRegFromQReg(Reg);
2740      isQReg = true;
2741    }
2742    // The register must be in the same register class as the first.
2743    if (!RC->contains(Reg))
2744      return Error(RegLoc, "invalid register in register list");
2745    // List must be monotonically increasing.
2746    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2747      return Error(RegLoc, "register list not in ascending order");
2748    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2749      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2750              ") in register list");
2751      continue;
2752    }
2753    // VFP register lists must also be contiguous.
2754    // It's OK to use the enumeration values directly here rather, as the
2755    // VFP register classes have the enum sorted properly.
2756    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2757        Reg != OldReg + 1)
2758      return Error(RegLoc, "non-contiguous register range");
2759    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2760    if (isQReg)
2761      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2762  }
2763
2764  SMLoc E = Parser.getTok().getLoc();
2765  if (Parser.getTok().isNot(AsmToken::RCurly))
2766    return Error(E, "'}' expected");
2767  Parser.Lex(); // Eat '}' token.
2768
2769  // Push the register list operand.
2770  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2771
2772  // The ARM system instruction variants for LDM/STM have a '^' token here.
2773  if (Parser.getTok().is(AsmToken::Caret)) {
2774    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2775    Parser.Lex(); // Eat '^' token.
2776  }
2777
2778  return false;
2779}
2780
2781// Helper function to parse the lane index for vector lists.
2782ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2783parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2784  Index = 0; // Always return a defined index value.
2785  if (Parser.getTok().is(AsmToken::LBrac)) {
2786    Parser.Lex(); // Eat the '['.
2787    if (Parser.getTok().is(AsmToken::RBrac)) {
2788      // "Dn[]" is the 'all lanes' syntax.
2789      LaneKind = AllLanes;
2790      Parser.Lex(); // Eat the ']'.
2791      return MatchOperand_Success;
2792    }
2793    const MCExpr *LaneIndex;
2794    SMLoc Loc = Parser.getTok().getLoc();
2795    if (getParser().ParseExpression(LaneIndex)) {
2796      Error(Loc, "illegal expression");
2797      return MatchOperand_ParseFail;
2798    }
2799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2800    if (!CE) {
2801      Error(Loc, "lane index must be empty or an integer");
2802      return MatchOperand_ParseFail;
2803    }
2804    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2805      Error(Parser.getTok().getLoc(), "']' expected");
2806      return MatchOperand_ParseFail;
2807    }
2808    Parser.Lex(); // Eat the ']'.
2809    int64_t Val = CE->getValue();
2810
2811    // FIXME: Make this range check context sensitive for .8, .16, .32.
2812    if (Val < 0 || Val > 7) {
2813      Error(Parser.getTok().getLoc(), "lane index out of range");
2814      return MatchOperand_ParseFail;
2815    }
2816    Index = Val;
2817    LaneKind = IndexedLane;
2818    return MatchOperand_Success;
2819  }
2820  LaneKind = NoLanes;
2821  return MatchOperand_Success;
2822}
2823
2824// parse a vector register list
2825ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2826parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2827  VectorLaneTy LaneKind;
2828  unsigned LaneIndex;
2829  SMLoc S = Parser.getTok().getLoc();
2830  // As an extension (to match gas), support a plain D register or Q register
2831  // (without encosing curly braces) as a single or double entry list,
2832  // respectively.
2833  if (Parser.getTok().is(AsmToken::Identifier)) {
2834    int Reg = tryParseRegister();
2835    if (Reg == -1)
2836      return MatchOperand_NoMatch;
2837    SMLoc E = Parser.getTok().getLoc();
2838    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2839      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2840      if (Res != MatchOperand_Success)
2841        return Res;
2842      switch (LaneKind) {
2843      default:
2844        assert(0 && "unexpected lane kind!");
2845      case NoLanes:
2846        E = Parser.getTok().getLoc();
2847        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2848        break;
2849      case AllLanes:
2850        E = Parser.getTok().getLoc();
2851        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2852                                                                S, E));
2853        break;
2854      case IndexedLane:
2855        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2856                                                               LaneIndex,
2857                                                               false, S, E));
2858        break;
2859      }
2860      return MatchOperand_Success;
2861    }
2862    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2863      Reg = getDRegFromQReg(Reg);
2864      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2865      if (Res != MatchOperand_Success)
2866        return Res;
2867      switch (LaneKind) {
2868      default:
2869        assert(0 && "unexpected lane kind!");
2870      case NoLanes:
2871        E = Parser.getTok().getLoc();
2872        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2873        break;
2874      case AllLanes:
2875        E = Parser.getTok().getLoc();
2876        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2877                                                                S, E));
2878        break;
2879      case IndexedLane:
2880        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2881                                                               LaneIndex,
2882                                                               false, S, E));
2883        break;
2884      }
2885      return MatchOperand_Success;
2886    }
2887    Error(S, "vector register expected");
2888    return MatchOperand_ParseFail;
2889  }
2890
2891  if (Parser.getTok().isNot(AsmToken::LCurly))
2892    return MatchOperand_NoMatch;
2893
2894  Parser.Lex(); // Eat '{' token.
2895  SMLoc RegLoc = Parser.getTok().getLoc();
2896
2897  int Reg = tryParseRegister();
2898  if (Reg == -1) {
2899    Error(RegLoc, "register expected");
2900    return MatchOperand_ParseFail;
2901  }
2902  unsigned Count = 1;
2903  int Spacing = 0;
2904  unsigned FirstReg = Reg;
2905  // The list is of D registers, but we also allow Q regs and just interpret
2906  // them as the two D sub-registers.
2907  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2908    FirstReg = Reg = getDRegFromQReg(Reg);
2909    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2910                 // it's ambiguous with four-register single spaced.
2911    ++Reg;
2912    ++Count;
2913  }
2914  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2915    return MatchOperand_ParseFail;
2916
2917  while (Parser.getTok().is(AsmToken::Comma) ||
2918         Parser.getTok().is(AsmToken::Minus)) {
2919    if (Parser.getTok().is(AsmToken::Minus)) {
2920      if (!Spacing)
2921        Spacing = 1; // Register range implies a single spaced list.
2922      else if (Spacing == 2) {
2923        Error(Parser.getTok().getLoc(),
2924              "sequential registers in double spaced list");
2925        return MatchOperand_ParseFail;
2926      }
2927      Parser.Lex(); // Eat the minus.
2928      SMLoc EndLoc = Parser.getTok().getLoc();
2929      int EndReg = tryParseRegister();
2930      if (EndReg == -1) {
2931        Error(EndLoc, "register expected");
2932        return MatchOperand_ParseFail;
2933      }
2934      // Allow Q regs and just interpret them as the two D sub-registers.
2935      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2936        EndReg = getDRegFromQReg(EndReg) + 1;
2937      // If the register is the same as the start reg, there's nothing
2938      // more to do.
2939      if (Reg == EndReg)
2940        continue;
2941      // The register must be in the same register class as the first.
2942      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2943        Error(EndLoc, "invalid register in register list");
2944        return MatchOperand_ParseFail;
2945      }
2946      // Ranges must go from low to high.
2947      if (Reg > EndReg) {
2948        Error(EndLoc, "bad range in register list");
2949        return MatchOperand_ParseFail;
2950      }
2951      // Parse the lane specifier if present.
2952      VectorLaneTy NextLaneKind;
2953      unsigned NextLaneIndex;
2954      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2955        return MatchOperand_ParseFail;
2956      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2957        Error(EndLoc, "mismatched lane index in register list");
2958        return MatchOperand_ParseFail;
2959      }
2960      EndLoc = Parser.getTok().getLoc();
2961
2962      // Add all the registers in the range to the register list.
2963      Count += EndReg - Reg;
2964      Reg = EndReg;
2965      continue;
2966    }
2967    Parser.Lex(); // Eat the comma.
2968    RegLoc = Parser.getTok().getLoc();
2969    int OldReg = Reg;
2970    Reg = tryParseRegister();
2971    if (Reg == -1) {
2972      Error(RegLoc, "register expected");
2973      return MatchOperand_ParseFail;
2974    }
2975    // vector register lists must be contiguous.
2976    // It's OK to use the enumeration values directly here rather, as the
2977    // VFP register classes have the enum sorted properly.
2978    //
2979    // The list is of D registers, but we also allow Q regs and just interpret
2980    // them as the two D sub-registers.
2981    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2982      if (!Spacing)
2983        Spacing = 1; // Register range implies a single spaced list.
2984      else if (Spacing == 2) {
2985        Error(RegLoc,
2986              "invalid register in double-spaced list (must be 'D' register')");
2987        return MatchOperand_ParseFail;
2988      }
2989      Reg = getDRegFromQReg(Reg);
2990      if (Reg != OldReg + 1) {
2991        Error(RegLoc, "non-contiguous register range");
2992        return MatchOperand_ParseFail;
2993      }
2994      ++Reg;
2995      Count += 2;
2996      // Parse the lane specifier if present.
2997      VectorLaneTy NextLaneKind;
2998      unsigned NextLaneIndex;
2999      SMLoc EndLoc = Parser.getTok().getLoc();
3000      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3001        return MatchOperand_ParseFail;
3002      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3003        Error(EndLoc, "mismatched lane index in register list");
3004        return MatchOperand_ParseFail;
3005      }
3006      continue;
3007    }
3008    // Normal D register.
3009    // Figure out the register spacing (single or double) of the list if
3010    // we don't know it already.
3011    if (!Spacing)
3012      Spacing = 1 + (Reg == OldReg + 2);
3013
3014    // Just check that it's contiguous and keep going.
3015    if (Reg != OldReg + Spacing) {
3016      Error(RegLoc, "non-contiguous register range");
3017      return MatchOperand_ParseFail;
3018    }
3019    ++Count;
3020    // Parse the lane specifier if present.
3021    VectorLaneTy NextLaneKind;
3022    unsigned NextLaneIndex;
3023    SMLoc EndLoc = Parser.getTok().getLoc();
3024    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3025      return MatchOperand_ParseFail;
3026    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3027      Error(EndLoc, "mismatched lane index in register list");
3028      return MatchOperand_ParseFail;
3029    }
3030  }
3031
3032  SMLoc E = Parser.getTok().getLoc();
3033  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3034    Error(E, "'}' expected");
3035    return MatchOperand_ParseFail;
3036  }
3037  Parser.Lex(); // Eat '}' token.
3038
3039  switch (LaneKind) {
3040  default:
3041    assert(0 && "unexpected lane kind in register list.");
3042  case NoLanes:
3043    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3044                                                    (Spacing == 2), S, E));
3045    break;
3046  case AllLanes:
3047    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3048                                                            (Spacing == 2),
3049                                                            S, E));
3050    break;
3051  case IndexedLane:
3052    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3053                                                           LaneIndex,
3054                                                           (Spacing == 2),
3055                                                           S, E));
3056    break;
3057  }
3058  return MatchOperand_Success;
3059}
3060
3061/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3062ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3063parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3064  SMLoc S = Parser.getTok().getLoc();
3065  const AsmToken &Tok = Parser.getTok();
3066  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3067  StringRef OptStr = Tok.getString();
3068
3069  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3070    .Case("sy",    ARM_MB::SY)
3071    .Case("st",    ARM_MB::ST)
3072    .Case("sh",    ARM_MB::ISH)
3073    .Case("ish",   ARM_MB::ISH)
3074    .Case("shst",  ARM_MB::ISHST)
3075    .Case("ishst", ARM_MB::ISHST)
3076    .Case("nsh",   ARM_MB::NSH)
3077    .Case("un",    ARM_MB::NSH)
3078    .Case("nshst", ARM_MB::NSHST)
3079    .Case("unst",  ARM_MB::NSHST)
3080    .Case("osh",   ARM_MB::OSH)
3081    .Case("oshst", ARM_MB::OSHST)
3082    .Default(~0U);
3083
3084  if (Opt == ~0U)
3085    return MatchOperand_NoMatch;
3086
3087  Parser.Lex(); // Eat identifier token.
3088  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3089  return MatchOperand_Success;
3090}
3091
3092/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3093ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3094parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3095  SMLoc S = Parser.getTok().getLoc();
3096  const AsmToken &Tok = Parser.getTok();
3097  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3098  StringRef IFlagsStr = Tok.getString();
3099
3100  // An iflags string of "none" is interpreted to mean that none of the AIF
3101  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3102  unsigned IFlags = 0;
3103  if (IFlagsStr != "none") {
3104        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3105      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3106        .Case("a", ARM_PROC::A)
3107        .Case("i", ARM_PROC::I)
3108        .Case("f", ARM_PROC::F)
3109        .Default(~0U);
3110
3111      // If some specific iflag is already set, it means that some letter is
3112      // present more than once, this is not acceptable.
3113      if (Flag == ~0U || (IFlags & Flag))
3114        return MatchOperand_NoMatch;
3115
3116      IFlags |= Flag;
3117    }
3118  }
3119
3120  Parser.Lex(); // Eat identifier token.
3121  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3122  return MatchOperand_Success;
3123}
3124
3125/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3126ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3127parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3128  SMLoc S = Parser.getTok().getLoc();
3129  const AsmToken &Tok = Parser.getTok();
3130  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3131  StringRef Mask = Tok.getString();
3132
3133  if (isMClass()) {
3134    // See ARMv6-M 10.1.1
3135    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3136      .Case("apsr", 0)
3137      .Case("iapsr", 1)
3138      .Case("eapsr", 2)
3139      .Case("xpsr", 3)
3140      .Case("ipsr", 5)
3141      .Case("epsr", 6)
3142      .Case("iepsr", 7)
3143      .Case("msp", 8)
3144      .Case("psp", 9)
3145      .Case("primask", 16)
3146      .Case("basepri", 17)
3147      .Case("basepri_max", 18)
3148      .Case("faultmask", 19)
3149      .Case("control", 20)
3150      .Default(~0U);
3151
3152    if (FlagsVal == ~0U)
3153      return MatchOperand_NoMatch;
3154
3155    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3156      // basepri, basepri_max and faultmask only valid for V7m.
3157      return MatchOperand_NoMatch;
3158
3159    Parser.Lex(); // Eat identifier token.
3160    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3161    return MatchOperand_Success;
3162  }
3163
3164  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3165  size_t Start = 0, Next = Mask.find('_');
3166  StringRef Flags = "";
3167  std::string SpecReg = Mask.slice(Start, Next).lower();
3168  if (Next != StringRef::npos)
3169    Flags = Mask.slice(Next+1, Mask.size());
3170
3171  // FlagsVal contains the complete mask:
3172  // 3-0: Mask
3173  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3174  unsigned FlagsVal = 0;
3175
3176  if (SpecReg == "apsr") {
3177    FlagsVal = StringSwitch<unsigned>(Flags)
3178    .Case("nzcvq",  0x8) // same as CPSR_f
3179    .Case("g",      0x4) // same as CPSR_s
3180    .Case("nzcvqg", 0xc) // same as CPSR_fs
3181    .Default(~0U);
3182
3183    if (FlagsVal == ~0U) {
3184      if (!Flags.empty())
3185        return MatchOperand_NoMatch;
3186      else
3187        FlagsVal = 8; // No flag
3188    }
3189  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3190    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3191      Flags = "fc";
3192    for (int i = 0, e = Flags.size(); i != e; ++i) {
3193      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3194      .Case("c", 1)
3195      .Case("x", 2)
3196      .Case("s", 4)
3197      .Case("f", 8)
3198      .Default(~0U);
3199
3200      // If some specific flag is already set, it means that some letter is
3201      // present more than once, this is not acceptable.
3202      if (FlagsVal == ~0U || (FlagsVal & Flag))
3203        return MatchOperand_NoMatch;
3204      FlagsVal |= Flag;
3205    }
3206  } else // No match for special register.
3207    return MatchOperand_NoMatch;
3208
3209  // Special register without flags is NOT equivalent to "fc" flags.
3210  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3211  // two lines would enable gas compatibility at the expense of breaking
3212  // round-tripping.
3213  //
3214  // if (!FlagsVal)
3215  //  FlagsVal = 0x9;
3216
3217  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3218  if (SpecReg == "spsr")
3219    FlagsVal |= 16;
3220
3221  Parser.Lex(); // Eat identifier token.
3222  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3223  return MatchOperand_Success;
3224}
3225
3226ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3227parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3228            int Low, int High) {
3229  const AsmToken &Tok = Parser.getTok();
3230  if (Tok.isNot(AsmToken::Identifier)) {
3231    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3232    return MatchOperand_ParseFail;
3233  }
3234  StringRef ShiftName = Tok.getString();
3235  std::string LowerOp = Op.lower();
3236  std::string UpperOp = Op.upper();
3237  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3238    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3239    return MatchOperand_ParseFail;
3240  }
3241  Parser.Lex(); // Eat shift type token.
3242
3243  // There must be a '#' and a shift amount.
3244  if (Parser.getTok().isNot(AsmToken::Hash) &&
3245      Parser.getTok().isNot(AsmToken::Dollar)) {
3246    Error(Parser.getTok().getLoc(), "'#' expected");
3247    return MatchOperand_ParseFail;
3248  }
3249  Parser.Lex(); // Eat hash token.
3250
3251  const MCExpr *ShiftAmount;
3252  SMLoc Loc = Parser.getTok().getLoc();
3253  if (getParser().ParseExpression(ShiftAmount)) {
3254    Error(Loc, "illegal expression");
3255    return MatchOperand_ParseFail;
3256  }
3257  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3258  if (!CE) {
3259    Error(Loc, "constant expression expected");
3260    return MatchOperand_ParseFail;
3261  }
3262  int Val = CE->getValue();
3263  if (Val < Low || Val > High) {
3264    Error(Loc, "immediate value out of range");
3265    return MatchOperand_ParseFail;
3266  }
3267
3268  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3269
3270  return MatchOperand_Success;
3271}
3272
3273ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3274parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3275  const AsmToken &Tok = Parser.getTok();
3276  SMLoc S = Tok.getLoc();
3277  if (Tok.isNot(AsmToken::Identifier)) {
3278    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3279    return MatchOperand_ParseFail;
3280  }
3281  int Val = StringSwitch<int>(Tok.getString())
3282    .Case("be", 1)
3283    .Case("le", 0)
3284    .Default(-1);
3285  Parser.Lex(); // Eat the token.
3286
3287  if (Val == -1) {
3288    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3289    return MatchOperand_ParseFail;
3290  }
3291  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3292                                                                  getContext()),
3293                                           S, Parser.getTok().getLoc()));
3294  return MatchOperand_Success;
3295}
3296
3297/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3298/// instructions. Legal values are:
3299///     lsl #n  'n' in [0,31]
3300///     asr #n  'n' in [1,32]
3301///             n == 32 encoded as n == 0.
3302ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3303parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3304  const AsmToken &Tok = Parser.getTok();
3305  SMLoc S = Tok.getLoc();
3306  if (Tok.isNot(AsmToken::Identifier)) {
3307    Error(S, "shift operator 'asr' or 'lsl' expected");
3308    return MatchOperand_ParseFail;
3309  }
3310  StringRef ShiftName = Tok.getString();
3311  bool isASR;
3312  if (ShiftName == "lsl" || ShiftName == "LSL")
3313    isASR = false;
3314  else if (ShiftName == "asr" || ShiftName == "ASR")
3315    isASR = true;
3316  else {
3317    Error(S, "shift operator 'asr' or 'lsl' expected");
3318    return MatchOperand_ParseFail;
3319  }
3320  Parser.Lex(); // Eat the operator.
3321
3322  // A '#' and a shift amount.
3323  if (Parser.getTok().isNot(AsmToken::Hash) &&
3324      Parser.getTok().isNot(AsmToken::Dollar)) {
3325    Error(Parser.getTok().getLoc(), "'#' expected");
3326    return MatchOperand_ParseFail;
3327  }
3328  Parser.Lex(); // Eat hash token.
3329
3330  const MCExpr *ShiftAmount;
3331  SMLoc E = Parser.getTok().getLoc();
3332  if (getParser().ParseExpression(ShiftAmount)) {
3333    Error(E, "malformed shift expression");
3334    return MatchOperand_ParseFail;
3335  }
3336  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3337  if (!CE) {
3338    Error(E, "shift amount must be an immediate");
3339    return MatchOperand_ParseFail;
3340  }
3341
3342  int64_t Val = CE->getValue();
3343  if (isASR) {
3344    // Shift amount must be in [1,32]
3345    if (Val < 1 || Val > 32) {
3346      Error(E, "'asr' shift amount must be in range [1,32]");
3347      return MatchOperand_ParseFail;
3348    }
3349    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3350    if (isThumb() && Val == 32) {
3351      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3352      return MatchOperand_ParseFail;
3353    }
3354    if (Val == 32) Val = 0;
3355  } else {
3356    // Shift amount must be in [1,32]
3357    if (Val < 0 || Val > 31) {
3358      Error(E, "'lsr' shift amount must be in range [0,31]");
3359      return MatchOperand_ParseFail;
3360    }
3361  }
3362
3363  E = Parser.getTok().getLoc();
3364  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3365
3366  return MatchOperand_Success;
3367}
3368
3369/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3370/// of instructions. Legal values are:
3371///     ror #n  'n' in {0, 8, 16, 24}
3372ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3373parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3374  const AsmToken &Tok = Parser.getTok();
3375  SMLoc S = Tok.getLoc();
3376  if (Tok.isNot(AsmToken::Identifier))
3377    return MatchOperand_NoMatch;
3378  StringRef ShiftName = Tok.getString();
3379  if (ShiftName != "ror" && ShiftName != "ROR")
3380    return MatchOperand_NoMatch;
3381  Parser.Lex(); // Eat the operator.
3382
3383  // A '#' and a rotate amount.
3384  if (Parser.getTok().isNot(AsmToken::Hash) &&
3385      Parser.getTok().isNot(AsmToken::Dollar)) {
3386    Error(Parser.getTok().getLoc(), "'#' expected");
3387    return MatchOperand_ParseFail;
3388  }
3389  Parser.Lex(); // Eat hash token.
3390
3391  const MCExpr *ShiftAmount;
3392  SMLoc E = Parser.getTok().getLoc();
3393  if (getParser().ParseExpression(ShiftAmount)) {
3394    Error(E, "malformed rotate expression");
3395    return MatchOperand_ParseFail;
3396  }
3397  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3398  if (!CE) {
3399    Error(E, "rotate amount must be an immediate");
3400    return MatchOperand_ParseFail;
3401  }
3402
3403  int64_t Val = CE->getValue();
3404  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3405  // normally, zero is represented in asm by omitting the rotate operand
3406  // entirely.
3407  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3408    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3409    return MatchOperand_ParseFail;
3410  }
3411
3412  E = Parser.getTok().getLoc();
3413  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3414
3415  return MatchOperand_Success;
3416}
3417
3418ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3419parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3420  SMLoc S = Parser.getTok().getLoc();
3421  // The bitfield descriptor is really two operands, the LSB and the width.
3422  if (Parser.getTok().isNot(AsmToken::Hash) &&
3423      Parser.getTok().isNot(AsmToken::Dollar)) {
3424    Error(Parser.getTok().getLoc(), "'#' expected");
3425    return MatchOperand_ParseFail;
3426  }
3427  Parser.Lex(); // Eat hash token.
3428
3429  const MCExpr *LSBExpr;
3430  SMLoc E = Parser.getTok().getLoc();
3431  if (getParser().ParseExpression(LSBExpr)) {
3432    Error(E, "malformed immediate expression");
3433    return MatchOperand_ParseFail;
3434  }
3435  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3436  if (!CE) {
3437    Error(E, "'lsb' operand must be an immediate");
3438    return MatchOperand_ParseFail;
3439  }
3440
3441  int64_t LSB = CE->getValue();
3442  // The LSB must be in the range [0,31]
3443  if (LSB < 0 || LSB > 31) {
3444    Error(E, "'lsb' operand must be in the range [0,31]");
3445    return MatchOperand_ParseFail;
3446  }
3447  E = Parser.getTok().getLoc();
3448
3449  // Expect another immediate operand.
3450  if (Parser.getTok().isNot(AsmToken::Comma)) {
3451    Error(Parser.getTok().getLoc(), "too few operands");
3452    return MatchOperand_ParseFail;
3453  }
3454  Parser.Lex(); // Eat hash token.
3455  if (Parser.getTok().isNot(AsmToken::Hash) &&
3456      Parser.getTok().isNot(AsmToken::Dollar)) {
3457    Error(Parser.getTok().getLoc(), "'#' expected");
3458    return MatchOperand_ParseFail;
3459  }
3460  Parser.Lex(); // Eat hash token.
3461
3462  const MCExpr *WidthExpr;
3463  if (getParser().ParseExpression(WidthExpr)) {
3464    Error(E, "malformed immediate expression");
3465    return MatchOperand_ParseFail;
3466  }
3467  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3468  if (!CE) {
3469    Error(E, "'width' operand must be an immediate");
3470    return MatchOperand_ParseFail;
3471  }
3472
3473  int64_t Width = CE->getValue();
3474  // The LSB must be in the range [1,32-lsb]
3475  if (Width < 1 || Width > 32 - LSB) {
3476    Error(E, "'width' operand must be in the range [1,32-lsb]");
3477    return MatchOperand_ParseFail;
3478  }
3479  E = Parser.getTok().getLoc();
3480
3481  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3482
3483  return MatchOperand_Success;
3484}
3485
3486ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3487parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3488  // Check for a post-index addressing register operand. Specifically:
3489  // postidx_reg := '+' register {, shift}
3490  //              | '-' register {, shift}
3491  //              | register {, shift}
3492
3493  // This method must return MatchOperand_NoMatch without consuming any tokens
3494  // in the case where there is no match, as other alternatives take other
3495  // parse methods.
3496  AsmToken Tok = Parser.getTok();
3497  SMLoc S = Tok.getLoc();
3498  bool haveEaten = false;
3499  bool isAdd = true;
3500  int Reg = -1;
3501  if (Tok.is(AsmToken::Plus)) {
3502    Parser.Lex(); // Eat the '+' token.
3503    haveEaten = true;
3504  } else if (Tok.is(AsmToken::Minus)) {
3505    Parser.Lex(); // Eat the '-' token.
3506    isAdd = false;
3507    haveEaten = true;
3508  }
3509  if (Parser.getTok().is(AsmToken::Identifier))
3510    Reg = tryParseRegister();
3511  if (Reg == -1) {
3512    if (!haveEaten)
3513      return MatchOperand_NoMatch;
3514    Error(Parser.getTok().getLoc(), "register expected");
3515    return MatchOperand_ParseFail;
3516  }
3517  SMLoc E = Parser.getTok().getLoc();
3518
3519  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3520  unsigned ShiftImm = 0;
3521  if (Parser.getTok().is(AsmToken::Comma)) {
3522    Parser.Lex(); // Eat the ','.
3523    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3524      return MatchOperand_ParseFail;
3525  }
3526
3527  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3528                                                  ShiftImm, S, E));
3529
3530  return MatchOperand_Success;
3531}
3532
3533ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3534parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3535  // Check for a post-index addressing register operand. Specifically:
3536  // am3offset := '+' register
3537  //              | '-' register
3538  //              | register
3539  //              | # imm
3540  //              | # + imm
3541  //              | # - imm
3542
3543  // This method must return MatchOperand_NoMatch without consuming any tokens
3544  // in the case where there is no match, as other alternatives take other
3545  // parse methods.
3546  AsmToken Tok = Parser.getTok();
3547  SMLoc S = Tok.getLoc();
3548
3549  // Do immediates first, as we always parse those if we have a '#'.
3550  if (Parser.getTok().is(AsmToken::Hash) ||
3551      Parser.getTok().is(AsmToken::Dollar)) {
3552    Parser.Lex(); // Eat the '#'.
3553    // Explicitly look for a '-', as we need to encode negative zero
3554    // differently.
3555    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3556    const MCExpr *Offset;
3557    if (getParser().ParseExpression(Offset))
3558      return MatchOperand_ParseFail;
3559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3560    if (!CE) {
3561      Error(S, "constant expression expected");
3562      return MatchOperand_ParseFail;
3563    }
3564    SMLoc E = Tok.getLoc();
3565    // Negative zero is encoded as the flag value INT32_MIN.
3566    int32_t Val = CE->getValue();
3567    if (isNegative && Val == 0)
3568      Val = INT32_MIN;
3569
3570    Operands.push_back(
3571      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3572
3573    return MatchOperand_Success;
3574  }
3575
3576
3577  bool haveEaten = false;
3578  bool isAdd = true;
3579  int Reg = -1;
3580  if (Tok.is(AsmToken::Plus)) {
3581    Parser.Lex(); // Eat the '+' token.
3582    haveEaten = true;
3583  } else if (Tok.is(AsmToken::Minus)) {
3584    Parser.Lex(); // Eat the '-' token.
3585    isAdd = false;
3586    haveEaten = true;
3587  }
3588  if (Parser.getTok().is(AsmToken::Identifier))
3589    Reg = tryParseRegister();
3590  if (Reg == -1) {
3591    if (!haveEaten)
3592      return MatchOperand_NoMatch;
3593    Error(Parser.getTok().getLoc(), "register expected");
3594    return MatchOperand_ParseFail;
3595  }
3596  SMLoc E = Parser.getTok().getLoc();
3597
3598  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3599                                                  0, S, E));
3600
3601  return MatchOperand_Success;
3602}
3603
3604/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3605/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3606/// when they refer multiple MIOperands inside a single one.
3607bool ARMAsmParser::
3608cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3609             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3610  // Rt, Rt2
3611  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3612  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3613  // Create a writeback register dummy placeholder.
3614  Inst.addOperand(MCOperand::CreateReg(0));
3615  // addr
3616  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3617  // pred
3618  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3619  return true;
3620}
3621
3622/// cvtT2StrdPre - Convert parsed operands to MCInst.
3623/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3624/// when they refer multiple MIOperands inside a single one.
3625bool ARMAsmParser::
3626cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3627             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3628  // Create a writeback register dummy placeholder.
3629  Inst.addOperand(MCOperand::CreateReg(0));
3630  // Rt, Rt2
3631  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3632  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3633  // addr
3634  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3635  // pred
3636  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3637  return true;
3638}
3639
3640/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3641/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3642/// when they refer multiple MIOperands inside a single one.
3643bool ARMAsmParser::
3644cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3645                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3646  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3647
3648  // Create a writeback register dummy placeholder.
3649  Inst.addOperand(MCOperand::CreateImm(0));
3650
3651  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3652  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3653  return true;
3654}
3655
3656/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3657/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3658/// when they refer multiple MIOperands inside a single one.
3659bool ARMAsmParser::
3660cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3661                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3662  // Create a writeback register dummy placeholder.
3663  Inst.addOperand(MCOperand::CreateImm(0));
3664  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3665  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3666  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3667  return true;
3668}
3669
3670/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3671/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3672/// when they refer multiple MIOperands inside a single one.
3673bool ARMAsmParser::
3674cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3675                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3676  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3677
3678  // Create a writeback register dummy placeholder.
3679  Inst.addOperand(MCOperand::CreateImm(0));
3680
3681  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3682  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3683  return true;
3684}
3685
3686/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3687/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3688/// when they refer multiple MIOperands inside a single one.
3689bool ARMAsmParser::
3690cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3691                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3692  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3693
3694  // Create a writeback register dummy placeholder.
3695  Inst.addOperand(MCOperand::CreateImm(0));
3696
3697  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3698  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3699  return true;
3700}
3701
3702
3703/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3704/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3705/// when they refer multiple MIOperands inside a single one.
3706bool ARMAsmParser::
3707cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3708                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3709  // Create a writeback register dummy placeholder.
3710  Inst.addOperand(MCOperand::CreateImm(0));
3711  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3712  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3713  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3714  return true;
3715}
3716
3717/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3718/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3719/// when they refer multiple MIOperands inside a single one.
3720bool ARMAsmParser::
3721cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3722                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3723  // Create a writeback register dummy placeholder.
3724  Inst.addOperand(MCOperand::CreateImm(0));
3725  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3726  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3727  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3728  return true;
3729}
3730
3731/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3732/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3733/// when they refer multiple MIOperands inside a single one.
3734bool ARMAsmParser::
3735cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3736                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3737  // Create a writeback register dummy placeholder.
3738  Inst.addOperand(MCOperand::CreateImm(0));
3739  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3740  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3741  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3742  return true;
3743}
3744
3745/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3746/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3747/// when they refer multiple MIOperands inside a single one.
3748bool ARMAsmParser::
3749cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3750                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3751  // Rt
3752  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3753  // Create a writeback register dummy placeholder.
3754  Inst.addOperand(MCOperand::CreateImm(0));
3755  // addr
3756  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3757  // offset
3758  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3759  // pred
3760  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3761  return true;
3762}
3763
3764/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3765/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3766/// when they refer multiple MIOperands inside a single one.
3767bool ARMAsmParser::
3768cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3769                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3770  // Rt
3771  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3772  // Create a writeback register dummy placeholder.
3773  Inst.addOperand(MCOperand::CreateImm(0));
3774  // addr
3775  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3776  // offset
3777  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3778  // pred
3779  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3780  return true;
3781}
3782
3783/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3784/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3785/// when they refer multiple MIOperands inside a single one.
3786bool ARMAsmParser::
3787cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3788                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3789  // Create a writeback register dummy placeholder.
3790  Inst.addOperand(MCOperand::CreateImm(0));
3791  // Rt
3792  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3793  // addr
3794  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3795  // offset
3796  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3797  // pred
3798  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3799  return true;
3800}
3801
3802/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3803/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3804/// when they refer multiple MIOperands inside a single one.
3805bool ARMAsmParser::
3806cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3807                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3808  // Create a writeback register dummy placeholder.
3809  Inst.addOperand(MCOperand::CreateImm(0));
3810  // Rt
3811  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3812  // addr
3813  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3814  // offset
3815  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3816  // pred
3817  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3818  return true;
3819}
3820
3821/// cvtLdrdPre - Convert parsed operands to MCInst.
3822/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3823/// when they refer multiple MIOperands inside a single one.
3824bool ARMAsmParser::
3825cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3826           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3827  // Rt, Rt2
3828  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3829  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3830  // Create a writeback register dummy placeholder.
3831  Inst.addOperand(MCOperand::CreateImm(0));
3832  // addr
3833  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3834  // pred
3835  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3836  return true;
3837}
3838
3839/// cvtStrdPre - Convert parsed operands to MCInst.
3840/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3841/// when they refer multiple MIOperands inside a single one.
3842bool ARMAsmParser::
3843cvtStrdPre(MCInst &Inst, unsigned Opcode,
3844           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3845  // Create a writeback register dummy placeholder.
3846  Inst.addOperand(MCOperand::CreateImm(0));
3847  // Rt, Rt2
3848  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3849  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3850  // addr
3851  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3852  // pred
3853  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3854  return true;
3855}
3856
3857/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3858/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3859/// when they refer multiple MIOperands inside a single one.
3860bool ARMAsmParser::
3861cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3862                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3863  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3864  // Create a writeback register dummy placeholder.
3865  Inst.addOperand(MCOperand::CreateImm(0));
3866  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3867  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3868  return true;
3869}
3870
3871/// cvtThumbMultiple- Convert parsed operands to MCInst.
3872/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3873/// when they refer multiple MIOperands inside a single one.
3874bool ARMAsmParser::
3875cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3876           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3877  // The second source operand must be the same register as the destination
3878  // operand.
3879  if (Operands.size() == 6 &&
3880      (((ARMOperand*)Operands[3])->getReg() !=
3881       ((ARMOperand*)Operands[5])->getReg()) &&
3882      (((ARMOperand*)Operands[3])->getReg() !=
3883       ((ARMOperand*)Operands[4])->getReg())) {
3884    Error(Operands[3]->getStartLoc(),
3885          "destination register must match source register");
3886    return false;
3887  }
3888  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3889  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3890  // If we have a three-operand form, make sure to set Rn to be the operand
3891  // that isn't the same as Rd.
3892  unsigned RegOp = 4;
3893  if (Operands.size() == 6 &&
3894      ((ARMOperand*)Operands[4])->getReg() ==
3895        ((ARMOperand*)Operands[3])->getReg())
3896    RegOp = 5;
3897  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3898  Inst.addOperand(Inst.getOperand(0));
3899  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3900
3901  return true;
3902}
3903
3904bool ARMAsmParser::
3905cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3906              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3907  // Vd
3908  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3909  // Create a writeback register dummy placeholder.
3910  Inst.addOperand(MCOperand::CreateImm(0));
3911  // Vn
3912  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3913  // pred
3914  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3915  return true;
3916}
3917
3918bool ARMAsmParser::
3919cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3920                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3921  // Vd
3922  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3923  // Create a writeback register dummy placeholder.
3924  Inst.addOperand(MCOperand::CreateImm(0));
3925  // Vn
3926  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3927  // Vm
3928  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3929  // pred
3930  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3931  return true;
3932}
3933
3934bool ARMAsmParser::
3935cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3936              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3937  // Create a writeback register dummy placeholder.
3938  Inst.addOperand(MCOperand::CreateImm(0));
3939  // Vn
3940  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3941  // Vt
3942  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3943  // pred
3944  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3945  return true;
3946}
3947
3948bool ARMAsmParser::
3949cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3950                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3951  // Create a writeback register dummy placeholder.
3952  Inst.addOperand(MCOperand::CreateImm(0));
3953  // Vn
3954  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3955  // Vm
3956  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3957  // Vt
3958  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3959  // pred
3960  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3961  return true;
3962}
3963
3964/// Parse an ARM memory expression, return false if successful else return true
3965/// or an error.  The first token must be a '[' when called.
3966bool ARMAsmParser::
3967parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3968  SMLoc S, E;
3969  assert(Parser.getTok().is(AsmToken::LBrac) &&
3970         "Token is not a Left Bracket");
3971  S = Parser.getTok().getLoc();
3972  Parser.Lex(); // Eat left bracket token.
3973
3974  const AsmToken &BaseRegTok = Parser.getTok();
3975  int BaseRegNum = tryParseRegister();
3976  if (BaseRegNum == -1)
3977    return Error(BaseRegTok.getLoc(), "register expected");
3978
3979  // The next token must either be a comma or a closing bracket.
3980  const AsmToken &Tok = Parser.getTok();
3981  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3982    return Error(Tok.getLoc(), "malformed memory operand");
3983
3984  if (Tok.is(AsmToken::RBrac)) {
3985    E = Tok.getLoc();
3986    Parser.Lex(); // Eat right bracket token.
3987
3988    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3989                                             0, 0, false, S, E));
3990
3991    // If there's a pre-indexing writeback marker, '!', just add it as a token
3992    // operand. It's rather odd, but syntactically valid.
3993    if (Parser.getTok().is(AsmToken::Exclaim)) {
3994      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3995      Parser.Lex(); // Eat the '!'.
3996    }
3997
3998    return false;
3999  }
4000
4001  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4002  Parser.Lex(); // Eat the comma.
4003
4004  // If we have a ':', it's an alignment specifier.
4005  if (Parser.getTok().is(AsmToken::Colon)) {
4006    Parser.Lex(); // Eat the ':'.
4007    E = Parser.getTok().getLoc();
4008
4009    const MCExpr *Expr;
4010    if (getParser().ParseExpression(Expr))
4011     return true;
4012
4013    // The expression has to be a constant. Memory references with relocations
4014    // don't come through here, as they use the <label> forms of the relevant
4015    // instructions.
4016    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4017    if (!CE)
4018      return Error (E, "constant expression expected");
4019
4020    unsigned Align = 0;
4021    switch (CE->getValue()) {
4022    default:
4023      return Error(E,
4024                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4025    case 16:  Align = 2; break;
4026    case 32:  Align = 4; break;
4027    case 64:  Align = 8; break;
4028    case 128: Align = 16; break;
4029    case 256: Align = 32; break;
4030    }
4031
4032    // Now we should have the closing ']'
4033    E = Parser.getTok().getLoc();
4034    if (Parser.getTok().isNot(AsmToken::RBrac))
4035      return Error(E, "']' expected");
4036    Parser.Lex(); // Eat right bracket token.
4037
4038    // Don't worry about range checking the value here. That's handled by
4039    // the is*() predicates.
4040    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4041                                             ARM_AM::no_shift, 0, Align,
4042                                             false, S, E));
4043
4044    // If there's a pre-indexing writeback marker, '!', just add it as a token
4045    // operand.
4046    if (Parser.getTok().is(AsmToken::Exclaim)) {
4047      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4048      Parser.Lex(); // Eat the '!'.
4049    }
4050
4051    return false;
4052  }
4053
4054  // If we have a '#', it's an immediate offset, else assume it's a register
4055  // offset. Be friendly and also accept a plain integer (without a leading
4056  // hash) for gas compatibility.
4057  if (Parser.getTok().is(AsmToken::Hash) ||
4058      Parser.getTok().is(AsmToken::Dollar) ||
4059      Parser.getTok().is(AsmToken::Integer)) {
4060    if (Parser.getTok().isNot(AsmToken::Integer))
4061      Parser.Lex(); // Eat the '#'.
4062    E = Parser.getTok().getLoc();
4063
4064    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4065    const MCExpr *Offset;
4066    if (getParser().ParseExpression(Offset))
4067     return true;
4068
4069    // The expression has to be a constant. Memory references with relocations
4070    // don't come through here, as they use the <label> forms of the relevant
4071    // instructions.
4072    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4073    if (!CE)
4074      return Error (E, "constant expression expected");
4075
4076    // If the constant was #-0, represent it as INT32_MIN.
4077    int32_t Val = CE->getValue();
4078    if (isNegative && Val == 0)
4079      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4080
4081    // Now we should have the closing ']'
4082    E = Parser.getTok().getLoc();
4083    if (Parser.getTok().isNot(AsmToken::RBrac))
4084      return Error(E, "']' expected");
4085    Parser.Lex(); // Eat right bracket token.
4086
4087    // Don't worry about range checking the value here. That's handled by
4088    // the is*() predicates.
4089    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4090                                             ARM_AM::no_shift, 0, 0,
4091                                             false, S, E));
4092
4093    // If there's a pre-indexing writeback marker, '!', just add it as a token
4094    // operand.
4095    if (Parser.getTok().is(AsmToken::Exclaim)) {
4096      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4097      Parser.Lex(); // Eat the '!'.
4098    }
4099
4100    return false;
4101  }
4102
4103  // The register offset is optionally preceded by a '+' or '-'
4104  bool isNegative = false;
4105  if (Parser.getTok().is(AsmToken::Minus)) {
4106    isNegative = true;
4107    Parser.Lex(); // Eat the '-'.
4108  } else if (Parser.getTok().is(AsmToken::Plus)) {
4109    // Nothing to do.
4110    Parser.Lex(); // Eat the '+'.
4111  }
4112
4113  E = Parser.getTok().getLoc();
4114  int OffsetRegNum = tryParseRegister();
4115  if (OffsetRegNum == -1)
4116    return Error(E, "register expected");
4117
4118  // If there's a shift operator, handle it.
4119  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4120  unsigned ShiftImm = 0;
4121  if (Parser.getTok().is(AsmToken::Comma)) {
4122    Parser.Lex(); // Eat the ','.
4123    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4124      return true;
4125  }
4126
4127  // Now we should have the closing ']'
4128  E = Parser.getTok().getLoc();
4129  if (Parser.getTok().isNot(AsmToken::RBrac))
4130    return Error(E, "']' expected");
4131  Parser.Lex(); // Eat right bracket token.
4132
4133  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4134                                           ShiftType, ShiftImm, 0, isNegative,
4135                                           S, E));
4136
4137  // If there's a pre-indexing writeback marker, '!', just add it as a token
4138  // operand.
4139  if (Parser.getTok().is(AsmToken::Exclaim)) {
4140    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4141    Parser.Lex(); // Eat the '!'.
4142  }
4143
4144  return false;
4145}
4146
4147/// parseMemRegOffsetShift - one of these two:
4148///   ( lsl | lsr | asr | ror ) , # shift_amount
4149///   rrx
4150/// return true if it parses a shift otherwise it returns false.
4151bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4152                                          unsigned &Amount) {
4153  SMLoc Loc = Parser.getTok().getLoc();
4154  const AsmToken &Tok = Parser.getTok();
4155  if (Tok.isNot(AsmToken::Identifier))
4156    return true;
4157  StringRef ShiftName = Tok.getString();
4158  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4159      ShiftName == "asl" || ShiftName == "ASL")
4160    St = ARM_AM::lsl;
4161  else if (ShiftName == "lsr" || ShiftName == "LSR")
4162    St = ARM_AM::lsr;
4163  else if (ShiftName == "asr" || ShiftName == "ASR")
4164    St = ARM_AM::asr;
4165  else if (ShiftName == "ror" || ShiftName == "ROR")
4166    St = ARM_AM::ror;
4167  else if (ShiftName == "rrx" || ShiftName == "RRX")
4168    St = ARM_AM::rrx;
4169  else
4170    return Error(Loc, "illegal shift operator");
4171  Parser.Lex(); // Eat shift type token.
4172
4173  // rrx stands alone.
4174  Amount = 0;
4175  if (St != ARM_AM::rrx) {
4176    Loc = Parser.getTok().getLoc();
4177    // A '#' and a shift amount.
4178    const AsmToken &HashTok = Parser.getTok();
4179    if (HashTok.isNot(AsmToken::Hash) &&
4180        HashTok.isNot(AsmToken::Dollar))
4181      return Error(HashTok.getLoc(), "'#' expected");
4182    Parser.Lex(); // Eat hash token.
4183
4184    const MCExpr *Expr;
4185    if (getParser().ParseExpression(Expr))
4186      return true;
4187    // Range check the immediate.
4188    // lsl, ror: 0 <= imm <= 31
4189    // lsr, asr: 0 <= imm <= 32
4190    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4191    if (!CE)
4192      return Error(Loc, "shift amount must be an immediate");
4193    int64_t Imm = CE->getValue();
4194    if (Imm < 0 ||
4195        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4196        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4197      return Error(Loc, "immediate shift value out of range");
4198    Amount = Imm;
4199  }
4200
4201  return false;
4202}
4203
4204/// parseFPImm - A floating point immediate expression operand.
4205ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4206parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4207  SMLoc S = Parser.getTok().getLoc();
4208
4209  if (Parser.getTok().isNot(AsmToken::Hash) &&
4210      Parser.getTok().isNot(AsmToken::Dollar))
4211    return MatchOperand_NoMatch;
4212
4213  // Disambiguate the VMOV forms that can accept an FP immediate.
4214  // vmov.f32 <sreg>, #imm
4215  // vmov.f64 <dreg>, #imm
4216  // vmov.f32 <dreg>, #imm  @ vector f32x2
4217  // vmov.f32 <qreg>, #imm  @ vector f32x4
4218  //
4219  // There are also the NEON VMOV instructions which expect an
4220  // integer constant. Make sure we don't try to parse an FPImm
4221  // for these:
4222  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4223  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4224  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4225                           TyOp->getToken() != ".f64"))
4226    return MatchOperand_NoMatch;
4227
4228  Parser.Lex(); // Eat the '#'.
4229
4230  // Handle negation, as that still comes through as a separate token.
4231  bool isNegative = false;
4232  if (Parser.getTok().is(AsmToken::Minus)) {
4233    isNegative = true;
4234    Parser.Lex();
4235  }
4236  const AsmToken &Tok = Parser.getTok();
4237  if (Tok.is(AsmToken::Real)) {
4238    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4239    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4240    // If we had a '-' in front, toggle the sign bit.
4241    IntVal ^= (uint64_t)isNegative << 63;
4242    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4243    Parser.Lex(); // Eat the token.
4244    if (Val == -1) {
4245      TokError("floating point value out of range");
4246      return MatchOperand_ParseFail;
4247    }
4248    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4249    return MatchOperand_Success;
4250  }
4251  if (Tok.is(AsmToken::Integer)) {
4252    int64_t Val = Tok.getIntVal();
4253    Parser.Lex(); // Eat the token.
4254    if (Val > 255 || Val < 0) {
4255      TokError("encoded floating point value out of range");
4256      return MatchOperand_ParseFail;
4257    }
4258    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4259    return MatchOperand_Success;
4260  }
4261
4262  TokError("invalid floating point immediate");
4263  return MatchOperand_ParseFail;
4264}
4265/// Parse a arm instruction operand.  For now this parses the operand regardless
4266/// of the mnemonic.
4267bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4268                                StringRef Mnemonic) {
4269  SMLoc S, E;
4270
4271  // Check if the current operand has a custom associated parser, if so, try to
4272  // custom parse the operand, or fallback to the general approach.
4273  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4274  if (ResTy == MatchOperand_Success)
4275    return false;
4276  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4277  // there was a match, but an error occurred, in which case, just return that
4278  // the operand parsing failed.
4279  if (ResTy == MatchOperand_ParseFail)
4280    return true;
4281
4282  switch (getLexer().getKind()) {
4283  default:
4284    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4285    return true;
4286  case AsmToken::Identifier: {
4287    if (!tryParseRegisterWithWriteBack(Operands))
4288      return false;
4289    int Res = tryParseShiftRegister(Operands);
4290    if (Res == 0) // success
4291      return false;
4292    else if (Res == -1) // irrecoverable error
4293      return true;
4294    // If this is VMRS, check for the apsr_nzcv operand.
4295    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4296      S = Parser.getTok().getLoc();
4297      Parser.Lex();
4298      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4299      return false;
4300    }
4301
4302    // Fall though for the Identifier case that is not a register or a
4303    // special name.
4304  }
4305  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4306  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4307  case AsmToken::String:  // quoted label names.
4308  case AsmToken::Dot: {   // . as a branch target
4309    // This was not a register so parse other operands that start with an
4310    // identifier (like labels) as expressions and create them as immediates.
4311    const MCExpr *IdVal;
4312    S = Parser.getTok().getLoc();
4313    if (getParser().ParseExpression(IdVal))
4314      return true;
4315    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4316    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4317    return false;
4318  }
4319  case AsmToken::LBrac:
4320    return parseMemory(Operands);
4321  case AsmToken::LCurly:
4322    return parseRegisterList(Operands);
4323  case AsmToken::Dollar:
4324  case AsmToken::Hash: {
4325    // #42 -> immediate.
4326    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4327    S = Parser.getTok().getLoc();
4328    Parser.Lex();
4329    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4330    const MCExpr *ImmVal;
4331    if (getParser().ParseExpression(ImmVal))
4332      return true;
4333    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4334    if (CE) {
4335      int32_t Val = CE->getValue();
4336      if (isNegative && Val == 0)
4337        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4338    }
4339    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4340    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4341    return false;
4342  }
4343  case AsmToken::Colon: {
4344    // ":lower16:" and ":upper16:" expression prefixes
4345    // FIXME: Check it's an expression prefix,
4346    // e.g. (FOO - :lower16:BAR) isn't legal.
4347    ARMMCExpr::VariantKind RefKind;
4348    if (parsePrefix(RefKind))
4349      return true;
4350
4351    const MCExpr *SubExprVal;
4352    if (getParser().ParseExpression(SubExprVal))
4353      return true;
4354
4355    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4356                                                   getContext());
4357    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4358    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4359    return false;
4360  }
4361  }
4362}
4363
4364// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4365//  :lower16: and :upper16:.
4366bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4367  RefKind = ARMMCExpr::VK_ARM_None;
4368
4369  // :lower16: and :upper16: modifiers
4370  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4371  Parser.Lex(); // Eat ':'
4372
4373  if (getLexer().isNot(AsmToken::Identifier)) {
4374    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4375    return true;
4376  }
4377
4378  StringRef IDVal = Parser.getTok().getIdentifier();
4379  if (IDVal == "lower16") {
4380    RefKind = ARMMCExpr::VK_ARM_LO16;
4381  } else if (IDVal == "upper16") {
4382    RefKind = ARMMCExpr::VK_ARM_HI16;
4383  } else {
4384    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4385    return true;
4386  }
4387  Parser.Lex();
4388
4389  if (getLexer().isNot(AsmToken::Colon)) {
4390    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4391    return true;
4392  }
4393  Parser.Lex(); // Eat the last ':'
4394  return false;
4395}
4396
4397/// \brief Given a mnemonic, split out possible predication code and carry
4398/// setting letters to form a canonical mnemonic and flags.
4399//
4400// FIXME: Would be nice to autogen this.
4401// FIXME: This is a bit of a maze of special cases.
4402StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4403                                      unsigned &PredicationCode,
4404                                      bool &CarrySetting,
4405                                      unsigned &ProcessorIMod,
4406                                      StringRef &ITMask) {
4407  PredicationCode = ARMCC::AL;
4408  CarrySetting = false;
4409  ProcessorIMod = 0;
4410
4411  // Ignore some mnemonics we know aren't predicated forms.
4412  //
4413  // FIXME: Would be nice to autogen this.
4414  if ((Mnemonic == "movs" && isThumb()) ||
4415      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4416      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4417      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4418      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4419      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4420      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4421      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4422      Mnemonic == "fmuls")
4423    return Mnemonic;
4424
4425  // First, split out any predication code. Ignore mnemonics we know aren't
4426  // predicated but do have a carry-set and so weren't caught above.
4427  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4428      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4429      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4430      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4431    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4432      .Case("eq", ARMCC::EQ)
4433      .Case("ne", ARMCC::NE)
4434      .Case("hs", ARMCC::HS)
4435      .Case("cs", ARMCC::HS)
4436      .Case("lo", ARMCC::LO)
4437      .Case("cc", ARMCC::LO)
4438      .Case("mi", ARMCC::MI)
4439      .Case("pl", ARMCC::PL)
4440      .Case("vs", ARMCC::VS)
4441      .Case("vc", ARMCC::VC)
4442      .Case("hi", ARMCC::HI)
4443      .Case("ls", ARMCC::LS)
4444      .Case("ge", ARMCC::GE)
4445      .Case("lt", ARMCC::LT)
4446      .Case("gt", ARMCC::GT)
4447      .Case("le", ARMCC::LE)
4448      .Case("al", ARMCC::AL)
4449      .Default(~0U);
4450    if (CC != ~0U) {
4451      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4452      PredicationCode = CC;
4453    }
4454  }
4455
4456  // Next, determine if we have a carry setting bit. We explicitly ignore all
4457  // the instructions we know end in 's'.
4458  if (Mnemonic.endswith("s") &&
4459      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4460        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4461        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4462        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4463        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4464        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4465        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4466        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4467        (Mnemonic == "movs" && isThumb()))) {
4468    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4469    CarrySetting = true;
4470  }
4471
4472  // The "cps" instruction can have a interrupt mode operand which is glued into
4473  // the mnemonic. Check if this is the case, split it and parse the imod op
4474  if (Mnemonic.startswith("cps")) {
4475    // Split out any imod code.
4476    unsigned IMod =
4477      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4478      .Case("ie", ARM_PROC::IE)
4479      .Case("id", ARM_PROC::ID)
4480      .Default(~0U);
4481    if (IMod != ~0U) {
4482      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4483      ProcessorIMod = IMod;
4484    }
4485  }
4486
4487  // The "it" instruction has the condition mask on the end of the mnemonic.
4488  if (Mnemonic.startswith("it")) {
4489    ITMask = Mnemonic.slice(2, Mnemonic.size());
4490    Mnemonic = Mnemonic.slice(0, 2);
4491  }
4492
4493  return Mnemonic;
4494}
4495
4496/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4497/// inclusion of carry set or predication code operands.
4498//
4499// FIXME: It would be nice to autogen this.
4500void ARMAsmParser::
4501getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4502                      bool &CanAcceptPredicationCode) {
4503  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4504      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4505      Mnemonic == "add" || Mnemonic == "adc" ||
4506      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4507      Mnemonic == "orr" || Mnemonic == "mvn" ||
4508      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4509      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4510      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4511                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4512                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4513    CanAcceptCarrySet = true;
4514  } else
4515    CanAcceptCarrySet = false;
4516
4517  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4518      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4519      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4520      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4521      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4522      (Mnemonic == "clrex" && !isThumb()) ||
4523      (Mnemonic == "nop" && isThumbOne()) ||
4524      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4525        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4526        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4527      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4528       !isThumb()) ||
4529      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4530    CanAcceptPredicationCode = false;
4531  } else
4532    CanAcceptPredicationCode = true;
4533
4534  if (isThumb()) {
4535    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4536        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4537      CanAcceptPredicationCode = false;
4538  }
4539}
4540
4541bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4542                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4543  // FIXME: This is all horribly hacky. We really need a better way to deal
4544  // with optional operands like this in the matcher table.
4545
4546  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4547  // another does not. Specifically, the MOVW instruction does not. So we
4548  // special case it here and remove the defaulted (non-setting) cc_out
4549  // operand if that's the instruction we're trying to match.
4550  //
4551  // We do this as post-processing of the explicit operands rather than just
4552  // conditionally adding the cc_out in the first place because we need
4553  // to check the type of the parsed immediate operand.
4554  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4555      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4556      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4557      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4558    return true;
4559
4560  // Register-register 'add' for thumb does not have a cc_out operand
4561  // when there are only two register operands.
4562  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4563      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4564      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4565      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4566    return true;
4567  // Register-register 'add' for thumb does not have a cc_out operand
4568  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4569  // have to check the immediate range here since Thumb2 has a variant
4570  // that can handle a different range and has a cc_out operand.
4571  if (((isThumb() && Mnemonic == "add") ||
4572       (isThumbTwo() && Mnemonic == "sub")) &&
4573      Operands.size() == 6 &&
4574      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4575      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4576      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4577      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4578      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4579       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4580    return true;
4581  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4582  // imm0_4095 variant. That's the least-preferred variant when
4583  // selecting via the generic "add" mnemonic, so to know that we
4584  // should remove the cc_out operand, we have to explicitly check that
4585  // it's not one of the other variants. Ugh.
4586  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4587      Operands.size() == 6 &&
4588      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4589      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4590      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4591    // Nest conditions rather than one big 'if' statement for readability.
4592    //
4593    // If either register is a high reg, it's either one of the SP
4594    // variants (handled above) or a 32-bit encoding, so we just
4595    // check against T3.
4596    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4597         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4598        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4599      return false;
4600    // If both registers are low, we're in an IT block, and the immediate is
4601    // in range, we should use encoding T1 instead, which has a cc_out.
4602    if (inITBlock() &&
4603        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4604        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4605        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4606      return false;
4607
4608    // Otherwise, we use encoding T4, which does not have a cc_out
4609    // operand.
4610    return true;
4611  }
4612
4613  // The thumb2 multiply instruction doesn't have a CCOut register, so
4614  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4615  // use the 16-bit encoding or not.
4616  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4617      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4618      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4619      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4620      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4621      // If the registers aren't low regs, the destination reg isn't the
4622      // same as one of the source regs, or the cc_out operand is zero
4623      // outside of an IT block, we have to use the 32-bit encoding, so
4624      // remove the cc_out operand.
4625      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4626       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4627       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4628       !inITBlock() ||
4629       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4630        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4631        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4632        static_cast<ARMOperand*>(Operands[4])->getReg())))
4633    return true;
4634
4635  // Also check the 'mul' syntax variant that doesn't specify an explicit
4636  // destination register.
4637  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4638      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4639      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4640      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4641      // If the registers aren't low regs  or the cc_out operand is zero
4642      // outside of an IT block, we have to use the 32-bit encoding, so
4643      // remove the cc_out operand.
4644      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4645       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4646       !inITBlock()))
4647    return true;
4648
4649
4650
4651  // Register-register 'add/sub' for thumb does not have a cc_out operand
4652  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4653  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4654  // right, this will result in better diagnostics (which operand is off)
4655  // anyway.
4656  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4657      (Operands.size() == 5 || Operands.size() == 6) &&
4658      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4659      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4660      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4661    return true;
4662
4663  return false;
4664}
4665
4666static bool isDataTypeToken(StringRef Tok) {
4667  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4668    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4669    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4670    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4671    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4672    Tok == ".f" || Tok == ".d";
4673}
4674
4675// FIXME: This bit should probably be handled via an explicit match class
4676// in the .td files that matches the suffix instead of having it be
4677// a literal string token the way it is now.
4678static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4679  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4680}
4681
4682static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4683/// Parse an arm instruction mnemonic followed by its operands.
4684bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4685                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4686  // Apply mnemonic aliases before doing anything else, as the destination
4687  // mnemnonic may include suffices and we want to handle them normally.
4688  // The generic tblgen'erated code does this later, at the start of
4689  // MatchInstructionImpl(), but that's too late for aliases that include
4690  // any sort of suffix.
4691  unsigned AvailableFeatures = getAvailableFeatures();
4692  applyMnemonicAliases(Name, AvailableFeatures);
4693
4694  // First check for the ARM-specific .req directive.
4695  if (Parser.getTok().is(AsmToken::Identifier) &&
4696      Parser.getTok().getIdentifier() == ".req") {
4697    parseDirectiveReq(Name, NameLoc);
4698    // We always return 'error' for this, as we're done with this
4699    // statement and don't need to match the 'instruction."
4700    return true;
4701  }
4702
4703  // Create the leading tokens for the mnemonic, split by '.' characters.
4704  size_t Start = 0, Next = Name.find('.');
4705  StringRef Mnemonic = Name.slice(Start, Next);
4706
4707  // Split out the predication code and carry setting flag from the mnemonic.
4708  unsigned PredicationCode;
4709  unsigned ProcessorIMod;
4710  bool CarrySetting;
4711  StringRef ITMask;
4712  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4713                           ProcessorIMod, ITMask);
4714
4715  // In Thumb1, only the branch (B) instruction can be predicated.
4716  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4717    Parser.EatToEndOfStatement();
4718    return Error(NameLoc, "conditional execution not supported in Thumb1");
4719  }
4720
4721  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4722
4723  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4724  // is the mask as it will be for the IT encoding if the conditional
4725  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4726  // where the conditional bit0 is zero, the instruction post-processing
4727  // will adjust the mask accordingly.
4728  if (Mnemonic == "it") {
4729    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4730    if (ITMask.size() > 3) {
4731      Parser.EatToEndOfStatement();
4732      return Error(Loc, "too many conditions on IT instruction");
4733    }
4734    unsigned Mask = 8;
4735    for (unsigned i = ITMask.size(); i != 0; --i) {
4736      char pos = ITMask[i - 1];
4737      if (pos != 't' && pos != 'e') {
4738        Parser.EatToEndOfStatement();
4739        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4740      }
4741      Mask >>= 1;
4742      if (ITMask[i - 1] == 't')
4743        Mask |= 8;
4744    }
4745    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4746  }
4747
4748  // FIXME: This is all a pretty gross hack. We should automatically handle
4749  // optional operands like this via tblgen.
4750
4751  // Next, add the CCOut and ConditionCode operands, if needed.
4752  //
4753  // For mnemonics which can ever incorporate a carry setting bit or predication
4754  // code, our matching model involves us always generating CCOut and
4755  // ConditionCode operands to match the mnemonic "as written" and then we let
4756  // the matcher deal with finding the right instruction or generating an
4757  // appropriate error.
4758  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4759  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4760
4761  // If we had a carry-set on an instruction that can't do that, issue an
4762  // error.
4763  if (!CanAcceptCarrySet && CarrySetting) {
4764    Parser.EatToEndOfStatement();
4765    return Error(NameLoc, "instruction '" + Mnemonic +
4766                 "' can not set flags, but 's' suffix specified");
4767  }
4768  // If we had a predication code on an instruction that can't do that, issue an
4769  // error.
4770  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4771    Parser.EatToEndOfStatement();
4772    return Error(NameLoc, "instruction '" + Mnemonic +
4773                 "' is not predicable, but condition code specified");
4774  }
4775
4776  // Add the carry setting operand, if necessary.
4777  if (CanAcceptCarrySet) {
4778    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4779    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4780                                               Loc));
4781  }
4782
4783  // Add the predication code operand, if necessary.
4784  if (CanAcceptPredicationCode) {
4785    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4786                                      CarrySetting);
4787    Operands.push_back(ARMOperand::CreateCondCode(
4788                         ARMCC::CondCodes(PredicationCode), Loc));
4789  }
4790
4791  // Add the processor imod operand, if necessary.
4792  if (ProcessorIMod) {
4793    Operands.push_back(ARMOperand::CreateImm(
4794          MCConstantExpr::Create(ProcessorIMod, getContext()),
4795                                 NameLoc, NameLoc));
4796  }
4797
4798  // Add the remaining tokens in the mnemonic.
4799  while (Next != StringRef::npos) {
4800    Start = Next;
4801    Next = Name.find('.', Start + 1);
4802    StringRef ExtraToken = Name.slice(Start, Next);
4803
4804    // Some NEON instructions have an optional datatype suffix that is
4805    // completely ignored. Check for that.
4806    if (isDataTypeToken(ExtraToken) &&
4807        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4808      continue;
4809
4810    if (ExtraToken != ".n") {
4811      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4812      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4813    }
4814  }
4815
4816  // Read the remaining operands.
4817  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4818    // Read the first operand.
4819    if (parseOperand(Operands, Mnemonic)) {
4820      Parser.EatToEndOfStatement();
4821      return true;
4822    }
4823
4824    while (getLexer().is(AsmToken::Comma)) {
4825      Parser.Lex();  // Eat the comma.
4826
4827      // Parse and remember the operand.
4828      if (parseOperand(Operands, Mnemonic)) {
4829        Parser.EatToEndOfStatement();
4830        return true;
4831      }
4832    }
4833  }
4834
4835  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4836    SMLoc Loc = getLexer().getLoc();
4837    Parser.EatToEndOfStatement();
4838    return Error(Loc, "unexpected token in argument list");
4839  }
4840
4841  Parser.Lex(); // Consume the EndOfStatement
4842
4843  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4844  // do and don't have a cc_out optional-def operand. With some spot-checks
4845  // of the operand list, we can figure out which variant we're trying to
4846  // parse and adjust accordingly before actually matching. We shouldn't ever
4847  // try to remove a cc_out operand that was explicitly set on the the
4848  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4849  // table driven matcher doesn't fit well with the ARM instruction set.
4850  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4851    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4852    Operands.erase(Operands.begin() + 1);
4853    delete Op;
4854  }
4855
4856  // ARM mode 'blx' need special handling, as the register operand version
4857  // is predicable, but the label operand version is not. So, we can't rely
4858  // on the Mnemonic based checking to correctly figure out when to put
4859  // a k_CondCode operand in the list. If we're trying to match the label
4860  // version, remove the k_CondCode operand here.
4861  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4862      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4863    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4864    Operands.erase(Operands.begin() + 1);
4865    delete Op;
4866  }
4867
4868  // The vector-compare-to-zero instructions have a literal token "#0" at
4869  // the end that comes to here as an immediate operand. Convert it to a
4870  // token to play nicely with the matcher.
4871  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4872      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4873      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4874    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4875    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4876    if (CE && CE->getValue() == 0) {
4877      Operands.erase(Operands.begin() + 5);
4878      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4879      delete Op;
4880    }
4881  }
4882  // VCMP{E} does the same thing, but with a different operand count.
4883  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4884      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4885    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4886    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4887    if (CE && CE->getValue() == 0) {
4888      Operands.erase(Operands.begin() + 4);
4889      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4890      delete Op;
4891    }
4892  }
4893  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4894  // end. Convert it to a token here. Take care not to convert those
4895  // that should hit the Thumb2 encoding.
4896  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4897      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4898      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4899      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4900    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4901    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4902    if (CE && CE->getValue() == 0 &&
4903        (isThumbOne() ||
4904         // The cc_out operand matches the IT block.
4905         ((inITBlock() != CarrySetting) &&
4906         // Neither register operand is a high register.
4907         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4908          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4909      Operands.erase(Operands.begin() + 5);
4910      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4911      delete Op;
4912    }
4913  }
4914
4915  return false;
4916}
4917
4918// Validate context-sensitive operand constraints.
4919
4920// return 'true' if register list contains non-low GPR registers,
4921// 'false' otherwise. If Reg is in the register list or is HiReg, set
4922// 'containsReg' to true.
4923static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4924                                 unsigned HiReg, bool &containsReg) {
4925  containsReg = false;
4926  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4927    unsigned OpReg = Inst.getOperand(i).getReg();
4928    if (OpReg == Reg)
4929      containsReg = true;
4930    // Anything other than a low register isn't legal here.
4931    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4932      return true;
4933  }
4934  return false;
4935}
4936
4937// Check if the specified regisgter is in the register list of the inst,
4938// starting at the indicated operand number.
4939static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4940  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4941    unsigned OpReg = Inst.getOperand(i).getReg();
4942    if (OpReg == Reg)
4943      return true;
4944  }
4945  return false;
4946}
4947
4948// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4949// the ARMInsts array) instead. Getting that here requires awkward
4950// API changes, though. Better way?
4951namespace llvm {
4952extern const MCInstrDesc ARMInsts[];
4953}
4954static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4955  return ARMInsts[Opcode];
4956}
4957
4958// FIXME: We would really like to be able to tablegen'erate this.
4959bool ARMAsmParser::
4960validateInstruction(MCInst &Inst,
4961                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4962  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4963  SMLoc Loc = Operands[0]->getStartLoc();
4964  // Check the IT block state first.
4965  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4966  // being allowed in IT blocks, but not being predicable.  It just always
4967  // executes.
4968  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4969    unsigned bit = 1;
4970    if (ITState.FirstCond)
4971      ITState.FirstCond = false;
4972    else
4973      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4974    // The instruction must be predicable.
4975    if (!MCID.isPredicable())
4976      return Error(Loc, "instructions in IT block must be predicable");
4977    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4978    unsigned ITCond = bit ? ITState.Cond :
4979      ARMCC::getOppositeCondition(ITState.Cond);
4980    if (Cond != ITCond) {
4981      // Find the condition code Operand to get its SMLoc information.
4982      SMLoc CondLoc;
4983      for (unsigned i = 1; i < Operands.size(); ++i)
4984        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4985          CondLoc = Operands[i]->getStartLoc();
4986      return Error(CondLoc, "incorrect condition in IT block; got '" +
4987                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4988                   "', but expected '" +
4989                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4990    }
4991  // Check for non-'al' condition codes outside of the IT block.
4992  } else if (isThumbTwo() && MCID.isPredicable() &&
4993             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4994             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4995             Inst.getOpcode() != ARM::t2B)
4996    return Error(Loc, "predicated instructions must be in IT block");
4997
4998  switch (Inst.getOpcode()) {
4999  case ARM::LDRD:
5000  case ARM::LDRD_PRE:
5001  case ARM::LDRD_POST:
5002  case ARM::LDREXD: {
5003    // Rt2 must be Rt + 1.
5004    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5005    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5006    if (Rt2 != Rt + 1)
5007      return Error(Operands[3]->getStartLoc(),
5008                   "destination operands must be sequential");
5009    return false;
5010  }
5011  case ARM::STRD: {
5012    // Rt2 must be Rt + 1.
5013    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5014    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5015    if (Rt2 != Rt + 1)
5016      return Error(Operands[3]->getStartLoc(),
5017                   "source operands must be sequential");
5018    return false;
5019  }
5020  case ARM::STRD_PRE:
5021  case ARM::STRD_POST:
5022  case ARM::STREXD: {
5023    // Rt2 must be Rt + 1.
5024    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5025    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5026    if (Rt2 != Rt + 1)
5027      return Error(Operands[3]->getStartLoc(),
5028                   "source operands must be sequential");
5029    return false;
5030  }
5031  case ARM::SBFX:
5032  case ARM::UBFX: {
5033    // width must be in range [1, 32-lsb]
5034    unsigned lsb = Inst.getOperand(2).getImm();
5035    unsigned widthm1 = Inst.getOperand(3).getImm();
5036    if (widthm1 >= 32 - lsb)
5037      return Error(Operands[5]->getStartLoc(),
5038                   "bitfield width must be in range [1,32-lsb]");
5039    return false;
5040  }
5041  case ARM::tLDMIA: {
5042    // If we're parsing Thumb2, the .w variant is available and handles
5043    // most cases that are normally illegal for a Thumb1 LDM
5044    // instruction. We'll make the transformation in processInstruction()
5045    // if necessary.
5046    //
5047    // Thumb LDM instructions are writeback iff the base register is not
5048    // in the register list.
5049    unsigned Rn = Inst.getOperand(0).getReg();
5050    bool hasWritebackToken =
5051      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5052       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5053    bool listContainsBase;
5054    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5055      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5056                   "registers must be in range r0-r7");
5057    // If we should have writeback, then there should be a '!' token.
5058    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5059      return Error(Operands[2]->getStartLoc(),
5060                   "writeback operator '!' expected");
5061    // If we should not have writeback, there must not be a '!'. This is
5062    // true even for the 32-bit wide encodings.
5063    if (listContainsBase && hasWritebackToken)
5064      return Error(Operands[3]->getStartLoc(),
5065                   "writeback operator '!' not allowed when base register "
5066                   "in register list");
5067
5068    break;
5069  }
5070  case ARM::t2LDMIA_UPD: {
5071    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5072      return Error(Operands[4]->getStartLoc(),
5073                   "writeback operator '!' not allowed when base register "
5074                   "in register list");
5075    break;
5076  }
5077  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5078  // so only issue a diagnostic for thumb1. The instructions will be
5079  // switched to the t2 encodings in processInstruction() if necessary.
5080  case ARM::tPOP: {
5081    bool listContainsBase;
5082    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5083        !isThumbTwo())
5084      return Error(Operands[2]->getStartLoc(),
5085                   "registers must be in range r0-r7 or pc");
5086    break;
5087  }
5088  case ARM::tPUSH: {
5089    bool listContainsBase;
5090    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5091        !isThumbTwo())
5092      return Error(Operands[2]->getStartLoc(),
5093                   "registers must be in range r0-r7 or lr");
5094    break;
5095  }
5096  case ARM::tSTMIA_UPD: {
5097    bool listContainsBase;
5098    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5099      return Error(Operands[4]->getStartLoc(),
5100                   "registers must be in range r0-r7");
5101    break;
5102  }
5103  }
5104
5105  return false;
5106}
5107
5108static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5109  switch(Opc) {
5110  default: assert(0 && "unexpected opcode!");
5111  // VST1LN
5112  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5113  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5114  case ARM::VST1LNdWB_fixed_Asm_U8:
5115    Spacing = 1;
5116    return ARM::VST1LNd8_UPD;
5117  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5118  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5119  case ARM::VST1LNdWB_fixed_Asm_U16:
5120    Spacing = 1;
5121    return ARM::VST1LNd16_UPD;
5122  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5123  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5124  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5125    Spacing = 1;
5126    return ARM::VST1LNd32_UPD;
5127  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5128  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5129  case ARM::VST1LNdWB_register_Asm_U8:
5130    Spacing = 1;
5131    return ARM::VST1LNd8_UPD;
5132  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5133  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5134  case ARM::VST1LNdWB_register_Asm_U16:
5135    Spacing = 1;
5136    return ARM::VST1LNd16_UPD;
5137  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5138  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5139  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5140    Spacing = 1;
5141    return ARM::VST1LNd32_UPD;
5142  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5143  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5144  case ARM::VST1LNdAsm_U8:
5145    Spacing = 1;
5146    return ARM::VST1LNd8;
5147  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5148  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5149  case ARM::VST1LNdAsm_U16:
5150    Spacing = 1;
5151    return ARM::VST1LNd16;
5152  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5153  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5154  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5155    Spacing = 1;
5156    return ARM::VST1LNd32;
5157
5158  // VST2LN
5159  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5160  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5161  case ARM::VST2LNdWB_fixed_Asm_U8:
5162    Spacing = 1;
5163    return ARM::VST2LNd8_UPD;
5164  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5165  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5166  case ARM::VST2LNdWB_fixed_Asm_U16:
5167    Spacing = 1;
5168    return ARM::VST2LNd16_UPD;
5169  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5170  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5171  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5172    Spacing = 1;
5173    return ARM::VST2LNd32_UPD;
5174  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5175  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5176  case ARM::VST2LNqWB_fixed_Asm_U16:
5177    Spacing = 2;
5178    return ARM::VST2LNq16_UPD;
5179  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5180  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5181  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5182    Spacing = 2;
5183    return ARM::VST2LNq32_UPD;
5184
5185  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5186  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5187  case ARM::VST2LNdWB_register_Asm_U8:
5188    Spacing = 1;
5189    return ARM::VST2LNd8_UPD;
5190  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5191  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5192  case ARM::VST2LNdWB_register_Asm_U16:
5193    Spacing = 1;
5194    return ARM::VST2LNd16_UPD;
5195  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5196  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5197  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5198    Spacing = 1;
5199    return ARM::VST2LNd32_UPD;
5200  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5201  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5202  case ARM::VST2LNqWB_register_Asm_U16:
5203    Spacing = 2;
5204    return ARM::VST2LNq16_UPD;
5205  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5206  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5207  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5208    Spacing = 2;
5209    return ARM::VST2LNq32_UPD;
5210
5211  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5212  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5213  case ARM::VST2LNdAsm_U8:
5214    Spacing = 1;
5215    return ARM::VST2LNd8;
5216  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5217  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5218  case ARM::VST2LNdAsm_U16:
5219    Spacing = 1;
5220    return ARM::VST2LNd16;
5221  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5222  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5223  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5224    Spacing = 1;
5225    return ARM::VST2LNd32;
5226  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5227  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5228  case ARM::VST2LNqAsm_U16:
5229    Spacing = 2;
5230    return ARM::VST2LNq16;
5231  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5232  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5233  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5234    Spacing = 2;
5235    return ARM::VST2LNq32;
5236  }
5237}
5238
5239static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5240  switch(Opc) {
5241  default: assert(0 && "unexpected opcode!");
5242  // VLD1LN
5243  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5244  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5245  case ARM::VLD1LNdWB_fixed_Asm_U8:
5246    Spacing = 1;
5247    return ARM::VLD1LNd8_UPD;
5248  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5249  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5250  case ARM::VLD1LNdWB_fixed_Asm_U16:
5251    Spacing = 1;
5252    return ARM::VLD1LNd16_UPD;
5253  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5254  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5255  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5256    Spacing = 1;
5257    return ARM::VLD1LNd32_UPD;
5258  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5259  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5260  case ARM::VLD1LNdWB_register_Asm_U8:
5261    Spacing = 1;
5262    return ARM::VLD1LNd8_UPD;
5263  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5264  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5265  case ARM::VLD1LNdWB_register_Asm_U16:
5266    Spacing = 1;
5267    return ARM::VLD1LNd16_UPD;
5268  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5269  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5270  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5271    Spacing = 1;
5272    return ARM::VLD1LNd32_UPD;
5273  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5274  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5275  case ARM::VLD1LNdAsm_U8:
5276    Spacing = 1;
5277    return ARM::VLD1LNd8;
5278  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5279  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5280  case ARM::VLD1LNdAsm_U16:
5281    Spacing = 1;
5282    return ARM::VLD1LNd16;
5283  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5284  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5285  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5286    Spacing = 1;
5287    return ARM::VLD1LNd32;
5288
5289  // VLD2LN
5290  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5291  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5292  case ARM::VLD2LNdWB_fixed_Asm_U8:
5293    Spacing = 1;
5294    return ARM::VLD2LNd8_UPD;
5295  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5296  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5297  case ARM::VLD2LNdWB_fixed_Asm_U16:
5298    Spacing = 1;
5299    return ARM::VLD2LNd16_UPD;
5300  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5301  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5302  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5303    Spacing = 1;
5304    return ARM::VLD2LNd32_UPD;
5305  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5306  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5307  case ARM::VLD2LNqWB_fixed_Asm_U16:
5308    Spacing = 1;
5309    return ARM::VLD2LNq16_UPD;
5310  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5311  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5312  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5313    Spacing = 2;
5314    return ARM::VLD2LNq32_UPD;
5315  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5316  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5317  case ARM::VLD2LNdWB_register_Asm_U8:
5318    Spacing = 1;
5319    return ARM::VLD2LNd8_UPD;
5320  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5321  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5322  case ARM::VLD2LNdWB_register_Asm_U16:
5323    Spacing = 1;
5324    return ARM::VLD2LNd16_UPD;
5325  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5326  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5327  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5328    Spacing = 1;
5329    return ARM::VLD2LNd32_UPD;
5330  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5331  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5332  case ARM::VLD2LNqWB_register_Asm_U16:
5333    Spacing = 2;
5334    return ARM::VLD2LNq16_UPD;
5335  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5336  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5337  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5338    Spacing = 2;
5339    return ARM::VLD2LNq32_UPD;
5340  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5341  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5342  case ARM::VLD2LNdAsm_U8:
5343    Spacing = 1;
5344    return ARM::VLD2LNd8;
5345  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5346  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5347  case ARM::VLD2LNdAsm_U16:
5348    Spacing = 1;
5349    return ARM::VLD2LNd16;
5350  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5351  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5352  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5353    Spacing = 1;
5354    return ARM::VLD2LNd32;
5355  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5356  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5357  case ARM::VLD2LNqAsm_U16:
5358    Spacing = 2;
5359    return ARM::VLD2LNq16;
5360  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5361  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5362  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5363    Spacing = 2;
5364    return ARM::VLD2LNq32;
5365  }
5366}
5367
5368bool ARMAsmParser::
5369processInstruction(MCInst &Inst,
5370                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5371  switch (Inst.getOpcode()) {
5372  // Handle NEON VST complex aliases.
5373  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5374  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5375  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5376  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5377  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5378  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5379  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5380  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5381    MCInst TmpInst;
5382    // Shuffle the operands around so the lane index operand is in the
5383    // right place.
5384    unsigned Spacing;
5385    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5386    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5387    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5388    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5389    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5390    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5391    TmpInst.addOperand(Inst.getOperand(1)); // lane
5392    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5393    TmpInst.addOperand(Inst.getOperand(6));
5394    Inst = TmpInst;
5395    return true;
5396  }
5397
5398  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5399  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5400  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5401  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5402  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5403  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5404  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5405  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5406  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5407  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5408  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5409  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5410  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5411  case ARM::VST2LNqWB_register_Asm_U32: {
5412    MCInst TmpInst;
5413    // Shuffle the operands around so the lane index operand is in the
5414    // right place.
5415    unsigned Spacing;
5416    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5417    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5418    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5419    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5420    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5421    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5422    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5423                                            Spacing));
5424    TmpInst.addOperand(Inst.getOperand(1)); // lane
5425    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5426    TmpInst.addOperand(Inst.getOperand(6));
5427    Inst = TmpInst;
5428    return true;
5429  }
5430  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5431  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5432  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5433  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5434  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5435  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5436  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5437  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5438    MCInst TmpInst;
5439    // Shuffle the operands around so the lane index operand is in the
5440    // right place.
5441    unsigned Spacing;
5442    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5443    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5444    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5445    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5446    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5447    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5448    TmpInst.addOperand(Inst.getOperand(1)); // lane
5449    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5450    TmpInst.addOperand(Inst.getOperand(5));
5451    Inst = TmpInst;
5452    return true;
5453  }
5454
5455  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5456  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5457  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5458  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5459  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5460  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5461  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5462  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5463  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5464  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5465  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5466  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5467  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5468  case ARM::VST2LNqWB_fixed_Asm_U32: {
5469    MCInst TmpInst;
5470    // Shuffle the operands around so the lane index operand is in the
5471    // right place.
5472    unsigned Spacing;
5473    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5474    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5475    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5476    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5477    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5478    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5479    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5480                                            Spacing));
5481    TmpInst.addOperand(Inst.getOperand(1)); // lane
5482    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5483    TmpInst.addOperand(Inst.getOperand(5));
5484    Inst = TmpInst;
5485    return true;
5486  }
5487  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5488  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5489  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5490  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5491  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5492  case ARM::VST1LNdAsm_U32: {
5493    MCInst TmpInst;
5494    // Shuffle the operands around so the lane index operand is in the
5495    // right place.
5496    unsigned Spacing;
5497    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5498    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5499    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5500    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5501    TmpInst.addOperand(Inst.getOperand(1)); // lane
5502    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5503    TmpInst.addOperand(Inst.getOperand(5));
5504    Inst = TmpInst;
5505    return true;
5506  }
5507
5508  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5509  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5510  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5511  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5512  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5513  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5514  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5515  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5516  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5517    MCInst TmpInst;
5518    // Shuffle the operands around so the lane index operand is in the
5519    // right place.
5520    unsigned Spacing;
5521    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5522    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5523    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5524    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5525    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5526                                            Spacing));
5527    TmpInst.addOperand(Inst.getOperand(1)); // lane
5528    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5529    TmpInst.addOperand(Inst.getOperand(5));
5530    Inst = TmpInst;
5531    return true;
5532  }
5533  // Handle NEON VLD complex aliases.
5534  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5535  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5536  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5537  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5538  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5539  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5540  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5541  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5542    MCInst TmpInst;
5543    // Shuffle the operands around so the lane index operand is in the
5544    // right place.
5545    unsigned Spacing;
5546    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5547    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5548    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5549    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5550    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5551    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5552    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5553    TmpInst.addOperand(Inst.getOperand(1)); // lane
5554    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5555    TmpInst.addOperand(Inst.getOperand(6));
5556    Inst = TmpInst;
5557    return true;
5558  }
5559
5560  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5561  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5562  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5563  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5564  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5565  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5566  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5567  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5568  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5569  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5570  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5571  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5572  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5573  case ARM::VLD2LNqWB_register_Asm_U32: {
5574    MCInst TmpInst;
5575    // Shuffle the operands around so the lane index operand is in the
5576    // right place.
5577    unsigned Spacing;
5578    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5579    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5580    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5581                                            Spacing));
5582    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5583    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5584    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5585    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5586    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5587    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5588                                            Spacing));
5589    TmpInst.addOperand(Inst.getOperand(1)); // lane
5590    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5591    TmpInst.addOperand(Inst.getOperand(6));
5592    Inst = TmpInst;
5593    return true;
5594  }
5595
5596  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5597  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5598  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5599  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5600  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5601  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5602  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5603  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5604    MCInst TmpInst;
5605    // Shuffle the operands around so the lane index operand is in the
5606    // right place.
5607    unsigned Spacing;
5608    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5609    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5610    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5611    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5612    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5613    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5614    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5615    TmpInst.addOperand(Inst.getOperand(1)); // lane
5616    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5617    TmpInst.addOperand(Inst.getOperand(5));
5618    Inst = TmpInst;
5619    return true;
5620  }
5621
5622  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5623  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5624  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5625  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5626  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5627  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5628  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5629  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5630  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5631  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5632  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5633  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5634  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5635  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5636    MCInst TmpInst;
5637    // Shuffle the operands around so the lane index operand is in the
5638    // right place.
5639    unsigned Spacing;
5640    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5641    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5642    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5643                                            Spacing));
5644    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5645    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5646    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5647    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5648    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5649    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5650                                            Spacing));
5651    TmpInst.addOperand(Inst.getOperand(1)); // lane
5652    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5653    TmpInst.addOperand(Inst.getOperand(5));
5654    Inst = TmpInst;
5655    return true;
5656  }
5657
5658  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5659  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5660  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5661  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5662  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5663  case ARM::VLD1LNdAsm_U32: {
5664    MCInst TmpInst;
5665    // Shuffle the operands around so the lane index operand is in the
5666    // right place.
5667    unsigned Spacing;
5668    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5669    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5670    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5671    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5672    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5673    TmpInst.addOperand(Inst.getOperand(1)); // lane
5674    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5675    TmpInst.addOperand(Inst.getOperand(5));
5676    Inst = TmpInst;
5677    return true;
5678  }
5679
5680  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5681  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5682  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5683  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5684  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5685  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5686  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5687  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5688  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5689  case ARM::VLD2LNqAsm_U32: {
5690    MCInst TmpInst;
5691    // Shuffle the operands around so the lane index operand is in the
5692    // right place.
5693    unsigned Spacing;
5694    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5695    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5696    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5697                                            Spacing));
5698    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5699    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5700    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5701    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5702                                            Spacing));
5703    TmpInst.addOperand(Inst.getOperand(1)); // lane
5704    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5705    TmpInst.addOperand(Inst.getOperand(5));
5706    Inst = TmpInst;
5707    return true;
5708  }
5709  // Handle the Thumb2 mode MOV complex aliases.
5710  case ARM::t2MOVsr:
5711  case ARM::t2MOVSsr: {
5712    // Which instruction to expand to depends on the CCOut operand and
5713    // whether we're in an IT block if the register operands are low
5714    // registers.
5715    bool isNarrow = false;
5716    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5717        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5718        isARMLowRegister(Inst.getOperand(2).getReg()) &&
5719        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
5720        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
5721      isNarrow = true;
5722    MCInst TmpInst;
5723    unsigned newOpc;
5724    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
5725    default: llvm_unreachable("unexpected opcode!");
5726    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
5727    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
5728    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
5729    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
5730    }
5731    TmpInst.setOpcode(newOpc);
5732    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5733    if (isNarrow)
5734      TmpInst.addOperand(MCOperand::CreateReg(
5735          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5736    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5737    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5738    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5739    TmpInst.addOperand(Inst.getOperand(5));
5740    if (!isNarrow)
5741      TmpInst.addOperand(MCOperand::CreateReg(
5742          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5743    Inst = TmpInst;
5744    return true;
5745  }
5746  case ARM::t2MOVsi:
5747  case ARM::t2MOVSsi: {
5748    // Which instruction to expand to depends on the CCOut operand and
5749    // whether we're in an IT block if the register operands are low
5750    // registers.
5751    bool isNarrow = false;
5752    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5753        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5754        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5755      isNarrow = true;
5756    MCInst TmpInst;
5757    unsigned newOpc;
5758    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5759    default: llvm_unreachable("unexpected opcode!");
5760    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5761    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5762    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5763    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5764    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
5765    }
5766    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5767    if (Ammount == 32) Ammount = 0;
5768    TmpInst.setOpcode(newOpc);
5769    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5770    if (isNarrow)
5771      TmpInst.addOperand(MCOperand::CreateReg(
5772          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5773    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5774    if (newOpc != ARM::t2RRX)
5775      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5776    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5777    TmpInst.addOperand(Inst.getOperand(4));
5778    if (!isNarrow)
5779      TmpInst.addOperand(MCOperand::CreateReg(
5780          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5781    Inst = TmpInst;
5782    return true;
5783  }
5784  // Handle the ARM mode MOV complex aliases.
5785  case ARM::ASRr:
5786  case ARM::LSRr:
5787  case ARM::LSLr:
5788  case ARM::RORr: {
5789    ARM_AM::ShiftOpc ShiftTy;
5790    switch(Inst.getOpcode()) {
5791    default: llvm_unreachable("unexpected opcode!");
5792    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5793    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5794    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5795    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5796    }
5797    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5798    MCInst TmpInst;
5799    TmpInst.setOpcode(ARM::MOVsr);
5800    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5801    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5802    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5803    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5804    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5805    TmpInst.addOperand(Inst.getOperand(4));
5806    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5807    Inst = TmpInst;
5808    return true;
5809  }
5810  case ARM::ASRi:
5811  case ARM::LSRi:
5812  case ARM::LSLi:
5813  case ARM::RORi: {
5814    ARM_AM::ShiftOpc ShiftTy;
5815    switch(Inst.getOpcode()) {
5816    default: llvm_unreachable("unexpected opcode!");
5817    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5818    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5819    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5820    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5821    }
5822    // A shift by zero is a plain MOVr, not a MOVsi.
5823    unsigned Amt = Inst.getOperand(2).getImm();
5824    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5825    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5826    MCInst TmpInst;
5827    TmpInst.setOpcode(Opc);
5828    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5829    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5830    if (Opc == ARM::MOVsi)
5831      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5832    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5833    TmpInst.addOperand(Inst.getOperand(4));
5834    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5835    Inst = TmpInst;
5836    return true;
5837  }
5838  case ARM::RRXi: {
5839    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5840    MCInst TmpInst;
5841    TmpInst.setOpcode(ARM::MOVsi);
5842    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5843    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5844    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5845    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5846    TmpInst.addOperand(Inst.getOperand(3));
5847    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5848    Inst = TmpInst;
5849    return true;
5850  }
5851  case ARM::t2LDMIA_UPD: {
5852    // If this is a load of a single register, then we should use
5853    // a post-indexed LDR instruction instead, per the ARM ARM.
5854    if (Inst.getNumOperands() != 5)
5855      return false;
5856    MCInst TmpInst;
5857    TmpInst.setOpcode(ARM::t2LDR_POST);
5858    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5859    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5860    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5861    TmpInst.addOperand(MCOperand::CreateImm(4));
5862    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5863    TmpInst.addOperand(Inst.getOperand(3));
5864    Inst = TmpInst;
5865    return true;
5866  }
5867  case ARM::t2STMDB_UPD: {
5868    // If this is a store of a single register, then we should use
5869    // a pre-indexed STR instruction instead, per the ARM ARM.
5870    if (Inst.getNumOperands() != 5)
5871      return false;
5872    MCInst TmpInst;
5873    TmpInst.setOpcode(ARM::t2STR_PRE);
5874    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5875    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5876    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5877    TmpInst.addOperand(MCOperand::CreateImm(-4));
5878    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5879    TmpInst.addOperand(Inst.getOperand(3));
5880    Inst = TmpInst;
5881    return true;
5882  }
5883  case ARM::LDMIA_UPD:
5884    // If this is a load of a single register via a 'pop', then we should use
5885    // a post-indexed LDR instruction instead, per the ARM ARM.
5886    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5887        Inst.getNumOperands() == 5) {
5888      MCInst TmpInst;
5889      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5890      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5891      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5892      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5893      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5894      TmpInst.addOperand(MCOperand::CreateImm(4));
5895      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5896      TmpInst.addOperand(Inst.getOperand(3));
5897      Inst = TmpInst;
5898      return true;
5899    }
5900    break;
5901  case ARM::STMDB_UPD:
5902    // If this is a store of a single register via a 'push', then we should use
5903    // a pre-indexed STR instruction instead, per the ARM ARM.
5904    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5905        Inst.getNumOperands() == 5) {
5906      MCInst TmpInst;
5907      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5908      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5909      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5910      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5911      TmpInst.addOperand(MCOperand::CreateImm(-4));
5912      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5913      TmpInst.addOperand(Inst.getOperand(3));
5914      Inst = TmpInst;
5915    }
5916    break;
5917  case ARM::t2ADDri12:
5918    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5919    // mnemonic was used (not "addw"), encoding T3 is preferred.
5920    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5921        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5922      break;
5923    Inst.setOpcode(ARM::t2ADDri);
5924    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5925    break;
5926  case ARM::t2SUBri12:
5927    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5928    // mnemonic was used (not "subw"), encoding T3 is preferred.
5929    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5930        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5931      break;
5932    Inst.setOpcode(ARM::t2SUBri);
5933    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5934    break;
5935  case ARM::tADDi8:
5936    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5937    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5938    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5939    // to encoding T1 if <Rd> is omitted."
5940    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5941      Inst.setOpcode(ARM::tADDi3);
5942      return true;
5943    }
5944    break;
5945  case ARM::tSUBi8:
5946    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5947    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5948    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5949    // to encoding T1 if <Rd> is omitted."
5950    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5951      Inst.setOpcode(ARM::tSUBi3);
5952      return true;
5953    }
5954    break;
5955  case ARM::t2ADDrr: {
5956    // If the destination and first source operand are the same, and
5957    // there's no setting of the flags, use encoding T2 instead of T3.
5958    // Note that this is only for ADD, not SUB. This mirrors the system
5959    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5960    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5961        Inst.getOperand(5).getReg() != 0 ||
5962        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5963         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5964      break;
5965    MCInst TmpInst;
5966    TmpInst.setOpcode(ARM::tADDhirr);
5967    TmpInst.addOperand(Inst.getOperand(0));
5968    TmpInst.addOperand(Inst.getOperand(0));
5969    TmpInst.addOperand(Inst.getOperand(2));
5970    TmpInst.addOperand(Inst.getOperand(3));
5971    TmpInst.addOperand(Inst.getOperand(4));
5972    Inst = TmpInst;
5973    return true;
5974  }
5975  case ARM::tB:
5976    // A Thumb conditional branch outside of an IT block is a tBcc.
5977    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5978      Inst.setOpcode(ARM::tBcc);
5979      return true;
5980    }
5981    break;
5982  case ARM::t2B:
5983    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5984    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5985      Inst.setOpcode(ARM::t2Bcc);
5986      return true;
5987    }
5988    break;
5989  case ARM::t2Bcc:
5990    // If the conditional is AL or we're in an IT block, we really want t2B.
5991    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5992      Inst.setOpcode(ARM::t2B);
5993      return true;
5994    }
5995    break;
5996  case ARM::tBcc:
5997    // If the conditional is AL, we really want tB.
5998    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5999      Inst.setOpcode(ARM::tB);
6000      return true;
6001    }
6002    break;
6003  case ARM::tLDMIA: {
6004    // If the register list contains any high registers, or if the writeback
6005    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6006    // instead if we're in Thumb2. Otherwise, this should have generated
6007    // an error in validateInstruction().
6008    unsigned Rn = Inst.getOperand(0).getReg();
6009    bool hasWritebackToken =
6010      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6011       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6012    bool listContainsBase;
6013    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6014        (!listContainsBase && !hasWritebackToken) ||
6015        (listContainsBase && hasWritebackToken)) {
6016      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6017      assert (isThumbTwo());
6018      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6019      // If we're switching to the updating version, we need to insert
6020      // the writeback tied operand.
6021      if (hasWritebackToken)
6022        Inst.insert(Inst.begin(),
6023                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6024      return true;
6025    }
6026    break;
6027  }
6028  case ARM::tSTMIA_UPD: {
6029    // If the register list contains any high registers, we need to use
6030    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6031    // should have generated an error in validateInstruction().
6032    unsigned Rn = Inst.getOperand(0).getReg();
6033    bool listContainsBase;
6034    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6035      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6036      assert (isThumbTwo());
6037      Inst.setOpcode(ARM::t2STMIA_UPD);
6038      return true;
6039    }
6040    break;
6041  }
6042  case ARM::tPOP: {
6043    bool listContainsBase;
6044    // If the register list contains any high registers, we need to use
6045    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6046    // should have generated an error in validateInstruction().
6047    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6048      return false;
6049    assert (isThumbTwo());
6050    Inst.setOpcode(ARM::t2LDMIA_UPD);
6051    // Add the base register and writeback operands.
6052    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6053    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6054    return true;
6055  }
6056  case ARM::tPUSH: {
6057    bool listContainsBase;
6058    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6059      return false;
6060    assert (isThumbTwo());
6061    Inst.setOpcode(ARM::t2STMDB_UPD);
6062    // Add the base register and writeback operands.
6063    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6064    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6065    return true;
6066  }
6067  case ARM::t2MOVi: {
6068    // If we can use the 16-bit encoding and the user didn't explicitly
6069    // request the 32-bit variant, transform it here.
6070    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6071        Inst.getOperand(1).getImm() <= 255 &&
6072        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6073         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6074        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6075        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6076         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6077      // The operands aren't in the same order for tMOVi8...
6078      MCInst TmpInst;
6079      TmpInst.setOpcode(ARM::tMOVi8);
6080      TmpInst.addOperand(Inst.getOperand(0));
6081      TmpInst.addOperand(Inst.getOperand(4));
6082      TmpInst.addOperand(Inst.getOperand(1));
6083      TmpInst.addOperand(Inst.getOperand(2));
6084      TmpInst.addOperand(Inst.getOperand(3));
6085      Inst = TmpInst;
6086      return true;
6087    }
6088    break;
6089  }
6090  case ARM::t2MOVr: {
6091    // If we can use the 16-bit encoding and the user didn't explicitly
6092    // request the 32-bit variant, transform it here.
6093    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6094        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6095        Inst.getOperand(2).getImm() == ARMCC::AL &&
6096        Inst.getOperand(4).getReg() == ARM::CPSR &&
6097        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6098         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6099      // The operands aren't the same for tMOV[S]r... (no cc_out)
6100      MCInst TmpInst;
6101      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6102      TmpInst.addOperand(Inst.getOperand(0));
6103      TmpInst.addOperand(Inst.getOperand(1));
6104      TmpInst.addOperand(Inst.getOperand(2));
6105      TmpInst.addOperand(Inst.getOperand(3));
6106      Inst = TmpInst;
6107      return true;
6108    }
6109    break;
6110  }
6111  case ARM::t2SXTH:
6112  case ARM::t2SXTB:
6113  case ARM::t2UXTH:
6114  case ARM::t2UXTB: {
6115    // If we can use the 16-bit encoding and the user didn't explicitly
6116    // request the 32-bit variant, transform it here.
6117    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6118        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6119        Inst.getOperand(2).getImm() == 0 &&
6120        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6121         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6122      unsigned NewOpc;
6123      switch (Inst.getOpcode()) {
6124      default: llvm_unreachable("Illegal opcode!");
6125      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6126      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6127      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6128      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6129      }
6130      // The operands aren't the same for thumb1 (no rotate operand).
6131      MCInst TmpInst;
6132      TmpInst.setOpcode(NewOpc);
6133      TmpInst.addOperand(Inst.getOperand(0));
6134      TmpInst.addOperand(Inst.getOperand(1));
6135      TmpInst.addOperand(Inst.getOperand(3));
6136      TmpInst.addOperand(Inst.getOperand(4));
6137      Inst = TmpInst;
6138      return true;
6139    }
6140    break;
6141  }
6142  case ARM::MOVsi: {
6143    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6144    if (SOpc == ARM_AM::rrx) return false;
6145    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6146      // Shifting by zero is accepted as a vanilla 'MOVr'
6147      MCInst TmpInst;
6148      TmpInst.setOpcode(ARM::MOVr);
6149      TmpInst.addOperand(Inst.getOperand(0));
6150      TmpInst.addOperand(Inst.getOperand(1));
6151      TmpInst.addOperand(Inst.getOperand(3));
6152      TmpInst.addOperand(Inst.getOperand(4));
6153      TmpInst.addOperand(Inst.getOperand(5));
6154      Inst = TmpInst;
6155      return true;
6156    }
6157    return false;
6158  }
6159  case ARM::ANDrsi:
6160  case ARM::ORRrsi:
6161  case ARM::EORrsi:
6162  case ARM::BICrsi:
6163  case ARM::SUBrsi:
6164  case ARM::ADDrsi: {
6165    unsigned newOpc;
6166    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
6167    if (SOpc == ARM_AM::rrx) return false;
6168    switch (Inst.getOpcode()) {
6169    default: assert("unexpected opcode!");
6170    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
6171    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
6172    case ARM::EORrsi: newOpc = ARM::EORrr; break;
6173    case ARM::BICrsi: newOpc = ARM::BICrr; break;
6174    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
6175    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
6176    }
6177    // If the shift is by zero, use the non-shifted instruction definition.
6178    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
6179      MCInst TmpInst;
6180      TmpInst.setOpcode(newOpc);
6181      TmpInst.addOperand(Inst.getOperand(0));
6182      TmpInst.addOperand(Inst.getOperand(1));
6183      TmpInst.addOperand(Inst.getOperand(2));
6184      TmpInst.addOperand(Inst.getOperand(4));
6185      TmpInst.addOperand(Inst.getOperand(5));
6186      TmpInst.addOperand(Inst.getOperand(6));
6187      Inst = TmpInst;
6188      return true;
6189    }
6190    return false;
6191  }
6192  case ARM::t2IT: {
6193    // The mask bits for all but the first condition are represented as
6194    // the low bit of the condition code value implies 't'. We currently
6195    // always have 1 implies 't', so XOR toggle the bits if the low bit
6196    // of the condition code is zero. The encoding also expects the low
6197    // bit of the condition to be encoded as bit 4 of the mask operand,
6198    // so mask that in if needed
6199    MCOperand &MO = Inst.getOperand(1);
6200    unsigned Mask = MO.getImm();
6201    unsigned OrigMask = Mask;
6202    unsigned TZ = CountTrailingZeros_32(Mask);
6203    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6204      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6205      for (unsigned i = 3; i != TZ; --i)
6206        Mask ^= 1 << i;
6207    } else
6208      Mask |= 0x10;
6209    MO.setImm(Mask);
6210
6211    // Set up the IT block state according to the IT instruction we just
6212    // matched.
6213    assert(!inITBlock() && "nested IT blocks?!");
6214    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6215    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6216    ITState.CurPosition = 0;
6217    ITState.FirstCond = true;
6218    break;
6219  }
6220  }
6221  return false;
6222}
6223
6224unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6225  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6226  // suffix depending on whether they're in an IT block or not.
6227  unsigned Opc = Inst.getOpcode();
6228  const MCInstrDesc &MCID = getInstDesc(Opc);
6229  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6230    assert(MCID.hasOptionalDef() &&
6231           "optionally flag setting instruction missing optional def operand");
6232    assert(MCID.NumOperands == Inst.getNumOperands() &&
6233           "operand count mismatch!");
6234    // Find the optional-def operand (cc_out).
6235    unsigned OpNo;
6236    for (OpNo = 0;
6237         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6238         ++OpNo)
6239      ;
6240    // If we're parsing Thumb1, reject it completely.
6241    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6242      return Match_MnemonicFail;
6243    // If we're parsing Thumb2, which form is legal depends on whether we're
6244    // in an IT block.
6245    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6246        !inITBlock())
6247      return Match_RequiresITBlock;
6248    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6249        inITBlock())
6250      return Match_RequiresNotITBlock;
6251  }
6252  // Some high-register supporting Thumb1 encodings only allow both registers
6253  // to be from r0-r7 when in Thumb2.
6254  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6255           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6256           isARMLowRegister(Inst.getOperand(2).getReg()))
6257    return Match_RequiresThumb2;
6258  // Others only require ARMv6 or later.
6259  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6260           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6261           isARMLowRegister(Inst.getOperand(1).getReg()))
6262    return Match_RequiresV6;
6263  return Match_Success;
6264}
6265
6266bool ARMAsmParser::
6267MatchAndEmitInstruction(SMLoc IDLoc,
6268                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6269                        MCStreamer &Out) {
6270  MCInst Inst;
6271  unsigned ErrorInfo;
6272  unsigned MatchResult;
6273  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6274  switch (MatchResult) {
6275  default: break;
6276  case Match_Success:
6277    // Context sensitive operand constraints aren't handled by the matcher,
6278    // so check them here.
6279    if (validateInstruction(Inst, Operands)) {
6280      // Still progress the IT block, otherwise one wrong condition causes
6281      // nasty cascading errors.
6282      forwardITPosition();
6283      return true;
6284    }
6285
6286    // Some instructions need post-processing to, for example, tweak which
6287    // encoding is selected. Loop on it while changes happen so the
6288    // individual transformations can chain off each other. E.g.,
6289    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6290    while (processInstruction(Inst, Operands))
6291      ;
6292
6293    // Only move forward at the very end so that everything in validate
6294    // and process gets a consistent answer about whether we're in an IT
6295    // block.
6296    forwardITPosition();
6297
6298    Out.EmitInstruction(Inst);
6299    return false;
6300  case Match_MissingFeature:
6301    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6302    return true;
6303  case Match_InvalidOperand: {
6304    SMLoc ErrorLoc = IDLoc;
6305    if (ErrorInfo != ~0U) {
6306      if (ErrorInfo >= Operands.size())
6307        return Error(IDLoc, "too few operands for instruction");
6308
6309      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6310      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6311    }
6312
6313    return Error(ErrorLoc, "invalid operand for instruction");
6314  }
6315  case Match_MnemonicFail:
6316    return Error(IDLoc, "invalid instruction");
6317  case Match_ConversionFail:
6318    // The converter function will have already emited a diagnostic.
6319    return true;
6320  case Match_RequiresNotITBlock:
6321    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6322  case Match_RequiresITBlock:
6323    return Error(IDLoc, "instruction only valid inside IT block");
6324  case Match_RequiresV6:
6325    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6326  case Match_RequiresThumb2:
6327    return Error(IDLoc, "instruction variant requires Thumb2");
6328  }
6329
6330  llvm_unreachable("Implement any new match types added!");
6331  return true;
6332}
6333
6334/// parseDirective parses the arm specific directives
6335bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6336  StringRef IDVal = DirectiveID.getIdentifier();
6337  if (IDVal == ".word")
6338    return parseDirectiveWord(4, DirectiveID.getLoc());
6339  else if (IDVal == ".thumb")
6340    return parseDirectiveThumb(DirectiveID.getLoc());
6341  else if (IDVal == ".arm")
6342    return parseDirectiveARM(DirectiveID.getLoc());
6343  else if (IDVal == ".thumb_func")
6344    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6345  else if (IDVal == ".code")
6346    return parseDirectiveCode(DirectiveID.getLoc());
6347  else if (IDVal == ".syntax")
6348    return parseDirectiveSyntax(DirectiveID.getLoc());
6349  else if (IDVal == ".unreq")
6350    return parseDirectiveUnreq(DirectiveID.getLoc());
6351  else if (IDVal == ".arch")
6352    return parseDirectiveArch(DirectiveID.getLoc());
6353  else if (IDVal == ".eabi_attribute")
6354    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6355  return true;
6356}
6357
6358/// parseDirectiveWord
6359///  ::= .word [ expression (, expression)* ]
6360bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6361  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6362    for (;;) {
6363      const MCExpr *Value;
6364      if (getParser().ParseExpression(Value))
6365        return true;
6366
6367      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6368
6369      if (getLexer().is(AsmToken::EndOfStatement))
6370        break;
6371
6372      // FIXME: Improve diagnostic.
6373      if (getLexer().isNot(AsmToken::Comma))
6374        return Error(L, "unexpected token in directive");
6375      Parser.Lex();
6376    }
6377  }
6378
6379  Parser.Lex();
6380  return false;
6381}
6382
6383/// parseDirectiveThumb
6384///  ::= .thumb
6385bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6386  if (getLexer().isNot(AsmToken::EndOfStatement))
6387    return Error(L, "unexpected token in directive");
6388  Parser.Lex();
6389
6390  if (!isThumb())
6391    SwitchMode();
6392  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6393  return false;
6394}
6395
6396/// parseDirectiveARM
6397///  ::= .arm
6398bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6399  if (getLexer().isNot(AsmToken::EndOfStatement))
6400    return Error(L, "unexpected token in directive");
6401  Parser.Lex();
6402
6403  if (isThumb())
6404    SwitchMode();
6405  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6406  return false;
6407}
6408
6409/// parseDirectiveThumbFunc
6410///  ::= .thumbfunc symbol_name
6411bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6412  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6413  bool isMachO = MAI.hasSubsectionsViaSymbols();
6414  StringRef Name;
6415  bool needFuncName = true;
6416
6417  // Darwin asm has (optionally) function name after .thumb_func direction
6418  // ELF doesn't
6419  if (isMachO) {
6420    const AsmToken &Tok = Parser.getTok();
6421    if (Tok.isNot(AsmToken::EndOfStatement)) {
6422      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6423        return Error(L, "unexpected token in .thumb_func directive");
6424      Name = Tok.getIdentifier();
6425      Parser.Lex(); // Consume the identifier token.
6426      needFuncName = false;
6427    }
6428  }
6429
6430  if (getLexer().isNot(AsmToken::EndOfStatement))
6431    return Error(L, "unexpected token in directive");
6432
6433  // Eat the end of statement and any blank lines that follow.
6434  while (getLexer().is(AsmToken::EndOfStatement))
6435    Parser.Lex();
6436
6437  // FIXME: assuming function name will be the line following .thumb_func
6438  // We really should be checking the next symbol definition even if there's
6439  // stuff in between.
6440  if (needFuncName) {
6441    Name = Parser.getTok().getIdentifier();
6442  }
6443
6444  // Mark symbol as a thumb symbol.
6445  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6446  getParser().getStreamer().EmitThumbFunc(Func);
6447  return false;
6448}
6449
6450/// parseDirectiveSyntax
6451///  ::= .syntax unified | divided
6452bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6453  const AsmToken &Tok = Parser.getTok();
6454  if (Tok.isNot(AsmToken::Identifier))
6455    return Error(L, "unexpected token in .syntax directive");
6456  StringRef Mode = Tok.getString();
6457  if (Mode == "unified" || Mode == "UNIFIED")
6458    Parser.Lex();
6459  else if (Mode == "divided" || Mode == "DIVIDED")
6460    return Error(L, "'.syntax divided' arm asssembly not supported");
6461  else
6462    return Error(L, "unrecognized syntax mode in .syntax directive");
6463
6464  if (getLexer().isNot(AsmToken::EndOfStatement))
6465    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6466  Parser.Lex();
6467
6468  // TODO tell the MC streamer the mode
6469  // getParser().getStreamer().Emit???();
6470  return false;
6471}
6472
6473/// parseDirectiveCode
6474///  ::= .code 16 | 32
6475bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6476  const AsmToken &Tok = Parser.getTok();
6477  if (Tok.isNot(AsmToken::Integer))
6478    return Error(L, "unexpected token in .code directive");
6479  int64_t Val = Parser.getTok().getIntVal();
6480  if (Val == 16)
6481    Parser.Lex();
6482  else if (Val == 32)
6483    Parser.Lex();
6484  else
6485    return Error(L, "invalid operand to .code directive");
6486
6487  if (getLexer().isNot(AsmToken::EndOfStatement))
6488    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6489  Parser.Lex();
6490
6491  if (Val == 16) {
6492    if (!isThumb())
6493      SwitchMode();
6494    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6495  } else {
6496    if (isThumb())
6497      SwitchMode();
6498    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6499  }
6500
6501  return false;
6502}
6503
6504/// parseDirectiveReq
6505///  ::= name .req registername
6506bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6507  Parser.Lex(); // Eat the '.req' token.
6508  unsigned Reg;
6509  SMLoc SRegLoc, ERegLoc;
6510  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6511    Parser.EatToEndOfStatement();
6512    return Error(SRegLoc, "register name expected");
6513  }
6514
6515  // Shouldn't be anything else.
6516  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6517    Parser.EatToEndOfStatement();
6518    return Error(Parser.getTok().getLoc(),
6519                 "unexpected input in .req directive.");
6520  }
6521
6522  Parser.Lex(); // Consume the EndOfStatement
6523
6524  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6525    return Error(SRegLoc, "redefinition of '" + Name +
6526                          "' does not match original.");
6527
6528  return false;
6529}
6530
6531/// parseDirectiveUneq
6532///  ::= .unreq registername
6533bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6534  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6535    Parser.EatToEndOfStatement();
6536    return Error(L, "unexpected input in .unreq directive.");
6537  }
6538  RegisterReqs.erase(Parser.getTok().getIdentifier());
6539  Parser.Lex(); // Eat the identifier.
6540  return false;
6541}
6542
6543/// parseDirectiveArch
6544///  ::= .arch token
6545bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6546  return true;
6547}
6548
6549/// parseDirectiveEabiAttr
6550///  ::= .eabi_attribute int, int
6551bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6552  return true;
6553}
6554
6555extern "C" void LLVMInitializeARMAsmLexer();
6556
6557/// Force static initialization.
6558extern "C" void LLVMInitializeARMAsmParser() {
6559  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6560  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6561  LLVMInitializeARMAsmLexer();
6562}
6563
6564#define GET_REGISTER_MATCHER
6565#define GET_MATCHER_IMPLEMENTATION
6566#include "ARMGenAsmMatcher.inc"
6567