ARMAsmParser.cpp revision 51222d1551383dd7b95ba356b1a5ed89df69e789
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_MemBarrierOpt,
274    k_Memory,
275    k_PostIndexRegister,
276    k_MSRMask,
277    k_ProcIFlags,
278    k_VectorIndex,
279    k_Register,
280    k_RegisterList,
281    k_DPRRegisterList,
282    k_SPRRegisterList,
283    k_VectorList,
284    k_VectorListAllLanes,
285    k_VectorListIndexed,
286    k_ShiftedRegister,
287    k_ShiftedImmediate,
288    k_ShifterImmediate,
289    k_RotateImmediate,
290    k_BitfieldDescriptor,
291    k_Token
292  } Kind;
293
294  SMLoc StartLoc, EndLoc;
295  SmallVector<unsigned, 8> Registers;
296
297  union {
298    struct {
299      ARMCC::CondCodes Val;
300    } CC;
301
302    struct {
303      unsigned Val;
304    } Cop;
305
306    struct {
307      unsigned Val;
308    } CoprocOption;
309
310    struct {
311      unsigned Mask:4;
312    } ITMask;
313
314    struct {
315      ARM_MB::MemBOpt Val;
316    } MBOpt;
317
318    struct {
319      ARM_PROC::IFlags Val;
320    } IFlags;
321
322    struct {
323      unsigned Val;
324    } MMask;
325
326    struct {
327      const char *Data;
328      unsigned Length;
329    } Tok;
330
331    struct {
332      unsigned RegNum;
333    } Reg;
334
335    // A vector register list is a sequential list of 1 to 4 registers.
336    struct {
337      unsigned RegNum;
338      unsigned Count;
339      unsigned LaneIndex;
340      bool isDoubleSpaced;
341    } VectorList;
342
343    struct {
344      unsigned Val;
345    } VectorIndex;
346
347    struct {
348      const MCExpr *Val;
349    } Imm;
350
351    /// Combined record for all forms of ARM address expressions.
352    struct {
353      unsigned BaseRegNum;
354      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
355      // was specified.
356      const MCConstantExpr *OffsetImm;  // Offset immediate value
357      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
358      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
359      unsigned ShiftImm;        // shift for OffsetReg.
360      unsigned Alignment;       // 0 = no alignment specified
361                                // n = alignment in bytes (2, 4, 8, 16, or 32)
362      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
363    } Memory;
364
365    struct {
366      unsigned RegNum;
367      bool isAdd;
368      ARM_AM::ShiftOpc ShiftTy;
369      unsigned ShiftImm;
370    } PostIdxReg;
371
372    struct {
373      bool isASR;
374      unsigned Imm;
375    } ShifterImm;
376    struct {
377      ARM_AM::ShiftOpc ShiftTy;
378      unsigned SrcReg;
379      unsigned ShiftReg;
380      unsigned ShiftImm;
381    } RegShiftedReg;
382    struct {
383      ARM_AM::ShiftOpc ShiftTy;
384      unsigned SrcReg;
385      unsigned ShiftImm;
386    } RegShiftedImm;
387    struct {
388      unsigned Imm;
389    } RotImm;
390    struct {
391      unsigned LSB;
392      unsigned Width;
393    } Bitfield;
394  };
395
396  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
397public:
398  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
399    Kind = o.Kind;
400    StartLoc = o.StartLoc;
401    EndLoc = o.EndLoc;
402    switch (Kind) {
403    case k_CondCode:
404      CC = o.CC;
405      break;
406    case k_ITCondMask:
407      ITMask = o.ITMask;
408      break;
409    case k_Token:
410      Tok = o.Tok;
411      break;
412    case k_CCOut:
413    case k_Register:
414      Reg = o.Reg;
415      break;
416    case k_RegisterList:
417    case k_DPRRegisterList:
418    case k_SPRRegisterList:
419      Registers = o.Registers;
420      break;
421    case k_VectorList:
422    case k_VectorListAllLanes:
423    case k_VectorListIndexed:
424      VectorList = o.VectorList;
425      break;
426    case k_CoprocNum:
427    case k_CoprocReg:
428      Cop = o.Cop;
429      break;
430    case k_CoprocOption:
431      CoprocOption = o.CoprocOption;
432      break;
433    case k_Immediate:
434      Imm = o.Imm;
435      break;
436    case k_MemBarrierOpt:
437      MBOpt = o.MBOpt;
438      break;
439    case k_Memory:
440      Memory = o.Memory;
441      break;
442    case k_PostIndexRegister:
443      PostIdxReg = o.PostIdxReg;
444      break;
445    case k_MSRMask:
446      MMask = o.MMask;
447      break;
448    case k_ProcIFlags:
449      IFlags = o.IFlags;
450      break;
451    case k_ShifterImmediate:
452      ShifterImm = o.ShifterImm;
453      break;
454    case k_ShiftedRegister:
455      RegShiftedReg = o.RegShiftedReg;
456      break;
457    case k_ShiftedImmediate:
458      RegShiftedImm = o.RegShiftedImm;
459      break;
460    case k_RotateImmediate:
461      RotImm = o.RotImm;
462      break;
463    case k_BitfieldDescriptor:
464      Bitfield = o.Bitfield;
465      break;
466    case k_VectorIndex:
467      VectorIndex = o.VectorIndex;
468      break;
469    }
470  }
471
472  /// getStartLoc - Get the location of the first token of this operand.
473  SMLoc getStartLoc() const { return StartLoc; }
474  /// getEndLoc - Get the location of the last token of this operand.
475  SMLoc getEndLoc() const { return EndLoc; }
476
477  ARMCC::CondCodes getCondCode() const {
478    assert(Kind == k_CondCode && "Invalid access!");
479    return CC.Val;
480  }
481
482  unsigned getCoproc() const {
483    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
484    return Cop.Val;
485  }
486
487  StringRef getToken() const {
488    assert(Kind == k_Token && "Invalid access!");
489    return StringRef(Tok.Data, Tok.Length);
490  }
491
492  unsigned getReg() const {
493    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
494    return Reg.RegNum;
495  }
496
497  const SmallVectorImpl<unsigned> &getRegList() const {
498    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
499            Kind == k_SPRRegisterList) && "Invalid access!");
500    return Registers;
501  }
502
503  const MCExpr *getImm() const {
504    assert(isImm() && "Invalid access!");
505    return Imm.Val;
506  }
507
508  unsigned getVectorIndex() const {
509    assert(Kind == k_VectorIndex && "Invalid access!");
510    return VectorIndex.Val;
511  }
512
513  ARM_MB::MemBOpt getMemBarrierOpt() const {
514    assert(Kind == k_MemBarrierOpt && "Invalid access!");
515    return MBOpt.Val;
516  }
517
518  ARM_PROC::IFlags getProcIFlags() const {
519    assert(Kind == k_ProcIFlags && "Invalid access!");
520    return IFlags.Val;
521  }
522
523  unsigned getMSRMask() const {
524    assert(Kind == k_MSRMask && "Invalid access!");
525    return MMask.Val;
526  }
527
528  bool isCoprocNum() const { return Kind == k_CoprocNum; }
529  bool isCoprocReg() const { return Kind == k_CoprocReg; }
530  bool isCoprocOption() const { return Kind == k_CoprocOption; }
531  bool isCondCode() const { return Kind == k_CondCode; }
532  bool isCCOut() const { return Kind == k_CCOut; }
533  bool isITMask() const { return Kind == k_ITCondMask; }
534  bool isITCondCode() const { return Kind == k_CondCode; }
535  bool isImm() const { return Kind == k_Immediate; }
536  bool isFPImm() const {
537    if (!isImm()) return false;
538    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
539    if (!CE) return false;
540    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
541    return Val != -1;
542  }
543  bool isFBits16() const {
544    if (!isImm()) return false;
545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
546    if (!CE) return false;
547    int64_t Value = CE->getValue();
548    return Value >= 0 && Value <= 16;
549  }
550  bool isFBits32() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 1 && Value <= 32;
556  }
557  bool isImm8s4() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
563  }
564  bool isImm0_1020s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
570  }
571  bool isImm0_508s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
577  }
578  bool isImm0_255() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return Value >= 0 && Value < 256;
584  }
585  bool isImm0_1() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 2;
591  }
592  bool isImm0_3() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 4;
598  }
599  bool isImm0_7() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 8;
605  }
606  bool isImm0_15() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 16;
612  }
613  bool isImm0_31() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 64;
626  }
627  bool isImm8() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value == 8;
633  }
634  bool isImm16() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 16;
640  }
641  bool isImm32() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 32;
647  }
648  bool isShrImm8() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value > 0 && Value <= 8;
654  }
655  bool isShrImm16() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 16;
661  }
662  bool isShrImm32() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 32;
668  }
669  bool isShrImm64() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 64;
675  }
676  bool isImm1_7() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value < 8;
682  }
683  bool isImm1_15() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 16;
689  }
690  bool isImm1_31() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 32;
696  }
697  bool isImm1_16() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 17;
703  }
704  bool isImm1_32() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 33;
710  }
711  bool isImm0_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value >= 0 && Value < 33;
717  }
718  bool isImm0_65535() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 65536;
724  }
725  bool isImm0_65535Expr() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    // If it's not a constant expression, it'll generate a fixup and be
729    // handled later.
730    if (!CE) return true;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value < 65536;
733  }
734  bool isImm24bit() const {
735    if (!isImm()) return false;
736    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
737    if (!CE) return false;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value <= 0xffffff;
740  }
741  bool isImmThumbSR() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value > 0 && Value < 33;
747  }
748  bool isPKHLSLImm() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value >= 0 && Value < 32;
754  }
755  bool isPKHASRImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value > 0 && Value <= 32;
761  }
762  bool isARMSOImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return ARM_AM::getSOImmVal(Value) != -1;
768  }
769  bool isARMSOImmNot() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(~Value) != -1;
775  }
776  bool isARMSOImmNeg() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(-Value) != -1;
782  }
783  bool isT2SOImm() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getT2SOImmVal(Value) != -1;
789  }
790  bool isT2SOImmNot() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(~Value) != -1;
796  }
797  bool isT2SOImmNeg() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(-Value) != -1;
803  }
804  bool isSetEndImm() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return Value == 1 || Value == 0;
810  }
811  bool isReg() const { return Kind == k_Register; }
812  bool isRegList() const { return Kind == k_RegisterList; }
813  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
814  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
815  bool isToken() const { return Kind == k_Token; }
816  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
817  bool isMemory() const { return Kind == k_Memory; }
818  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
819  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
820  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
821  bool isRotImm() const { return Kind == k_RotateImmediate; }
822  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
823  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
824  bool isPostIdxReg() const {
825    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
826  }
827  bool isMemNoOffset(bool alignOK = false) const {
828    if (!isMemory())
829      return false;
830    // No offset of any kind.
831    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
832     (alignOK || Memory.Alignment == 0);
833  }
834  bool isMemPCRelImm12() const {
835    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
836      return false;
837    // Base register must be PC.
838    if (Memory.BaseRegNum != ARM::PC)
839      return false;
840    // Immediate offset in range [-4095, 4095].
841    if (!Memory.OffsetImm) return true;
842    int64_t Val = Memory.OffsetImm->getValue();
843    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
844  }
845  bool isAlignedMemory() const {
846    return isMemNoOffset(true);
847  }
848  bool isAddrMode2() const {
849    if (!isMemory() || Memory.Alignment != 0) return false;
850    // Check for register offset.
851    if (Memory.OffsetRegNum) return true;
852    // Immediate offset in range [-4095, 4095].
853    if (!Memory.OffsetImm) return true;
854    int64_t Val = Memory.OffsetImm->getValue();
855    return Val > -4096 && Val < 4096;
856  }
857  bool isAM2OffsetImm() const {
858    if (!isImm()) return false;
859    // Immediate offset in range [-4095, 4095].
860    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
861    if (!CE) return false;
862    int64_t Val = CE->getValue();
863    return Val > -4096 && Val < 4096;
864  }
865  bool isAddrMode3() const {
866    // If we have an immediate that's not a constant, treat it as a label
867    // reference needing a fixup. If it is a constant, it's something else
868    // and we reject it.
869    if (isImm() && !isa<MCConstantExpr>(getImm()))
870      return true;
871    if (!isMemory() || Memory.Alignment != 0) return false;
872    // No shifts are legal for AM3.
873    if (Memory.ShiftType != ARM_AM::no_shift) return false;
874    // Check for register offset.
875    if (Memory.OffsetRegNum) return true;
876    // Immediate offset in range [-255, 255].
877    if (!Memory.OffsetImm) return true;
878    int64_t Val = Memory.OffsetImm->getValue();
879    return Val > -256 && Val < 256;
880  }
881  bool isAM3Offset() const {
882    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
883      return false;
884    if (Kind == k_PostIndexRegister)
885      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
886    // Immediate offset in range [-255, 255].
887    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
888    if (!CE) return false;
889    int64_t Val = CE->getValue();
890    // Special case, #-0 is INT32_MIN.
891    return (Val > -256 && Val < 256) || Val == INT32_MIN;
892  }
893  bool isAddrMode5() const {
894    // If we have an immediate that's not a constant, treat it as a label
895    // reference needing a fixup. If it is a constant, it's something else
896    // and we reject it.
897    if (isImm() && !isa<MCConstantExpr>(getImm()))
898      return true;
899    if (!isMemory() || Memory.Alignment != 0) return false;
900    // Check for register offset.
901    if (Memory.OffsetRegNum) return false;
902    // Immediate offset in range [-1020, 1020] and a multiple of 4.
903    if (!Memory.OffsetImm) return true;
904    int64_t Val = Memory.OffsetImm->getValue();
905    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
906      Val == INT32_MIN;
907  }
908  bool isMemTBB() const {
909    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
910        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
911      return false;
912    return true;
913  }
914  bool isMemTBH() const {
915    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
916        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
917        Memory.Alignment != 0 )
918      return false;
919    return true;
920  }
921  bool isMemRegOffset() const {
922    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
923      return false;
924    return true;
925  }
926  bool isT2MemRegOffset() const {
927    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
928        Memory.Alignment != 0)
929      return false;
930    // Only lsl #{0, 1, 2, 3} allowed.
931    if (Memory.ShiftType == ARM_AM::no_shift)
932      return true;
933    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
934      return false;
935    return true;
936  }
937  bool isMemThumbRR() const {
938    // Thumb reg+reg addressing is simple. Just two registers, a base and
939    // an offset. No shifts, negations or any other complicating factors.
940    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
941        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
942      return false;
943    return isARMLowRegister(Memory.BaseRegNum) &&
944      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
945  }
946  bool isMemThumbRIs4() const {
947    if (!isMemory() || Memory.OffsetRegNum != 0 ||
948        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
949      return false;
950    // Immediate offset, multiple of 4 in range [0, 124].
951    if (!Memory.OffsetImm) return true;
952    int64_t Val = Memory.OffsetImm->getValue();
953    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
954  }
955  bool isMemThumbRIs2() const {
956    if (!isMemory() || Memory.OffsetRegNum != 0 ||
957        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
958      return false;
959    // Immediate offset, multiple of 4 in range [0, 62].
960    if (!Memory.OffsetImm) return true;
961    int64_t Val = Memory.OffsetImm->getValue();
962    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
963  }
964  bool isMemThumbRIs1() const {
965    if (!isMemory() || Memory.OffsetRegNum != 0 ||
966        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
967      return false;
968    // Immediate offset in range [0, 31].
969    if (!Memory.OffsetImm) return true;
970    int64_t Val = Memory.OffsetImm->getValue();
971    return Val >= 0 && Val <= 31;
972  }
973  bool isMemThumbSPI() const {
974    if (!isMemory() || Memory.OffsetRegNum != 0 ||
975        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
976      return false;
977    // Immediate offset, multiple of 4 in range [0, 1020].
978    if (!Memory.OffsetImm) return true;
979    int64_t Val = Memory.OffsetImm->getValue();
980    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
981  }
982  bool isMemImm8s4Offset() const {
983    // If we have an immediate that's not a constant, treat it as a label
984    // reference needing a fixup. If it is a constant, it's something else
985    // and we reject it.
986    if (isImm() && !isa<MCConstantExpr>(getImm()))
987      return true;
988    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
989      return false;
990    // Immediate offset a multiple of 4 in range [-1020, 1020].
991    if (!Memory.OffsetImm) return true;
992    int64_t Val = Memory.OffsetImm->getValue();
993    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
994  }
995  bool isMemImm0_1020s4Offset() const {
996    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
997      return false;
998    // Immediate offset a multiple of 4 in range [0, 1020].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1002  }
1003  bool isMemImm8Offset() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1005      return false;
1006    // Base reg of PC isn't allowed for these encodings.
1007    if (Memory.BaseRegNum == ARM::PC) return false;
1008    // Immediate offset in range [-255, 255].
1009    if (!Memory.OffsetImm) return true;
1010    int64_t Val = Memory.OffsetImm->getValue();
1011    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1012  }
1013  bool isMemPosImm8Offset() const {
1014    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1015      return false;
1016    // Immediate offset in range [0, 255].
1017    if (!Memory.OffsetImm) return true;
1018    int64_t Val = Memory.OffsetImm->getValue();
1019    return Val >= 0 && Val < 256;
1020  }
1021  bool isMemNegImm8Offset() const {
1022    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1023      return false;
1024    // Base reg of PC isn't allowed for these encodings.
1025    if (Memory.BaseRegNum == ARM::PC) return false;
1026    // Immediate offset in range [-255, -1].
1027    if (!Memory.OffsetImm) return false;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1030  }
1031  bool isMemUImm12Offset() const {
1032    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1033      return false;
1034    // Immediate offset in range [0, 4095].
1035    if (!Memory.OffsetImm) return true;
1036    int64_t Val = Memory.OffsetImm->getValue();
1037    return (Val >= 0 && Val < 4096);
1038  }
1039  bool isMemImm12Offset() const {
1040    // If we have an immediate that's not a constant, treat it as a label
1041    // reference needing a fixup. If it is a constant, it's something else
1042    // and we reject it.
1043    if (isImm() && !isa<MCConstantExpr>(getImm()))
1044      return true;
1045
1046    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1047      return false;
1048    // Immediate offset in range [-4095, 4095].
1049    if (!Memory.OffsetImm) return true;
1050    int64_t Val = Memory.OffsetImm->getValue();
1051    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1052  }
1053  bool isPostIdxImm8() const {
1054    if (!isImm()) return false;
1055    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1056    if (!CE) return false;
1057    int64_t Val = CE->getValue();
1058    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1059  }
1060  bool isPostIdxImm8s4() const {
1061    if (!isImm()) return false;
1062    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1063    if (!CE) return false;
1064    int64_t Val = CE->getValue();
1065    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1066      (Val == INT32_MIN);
1067  }
1068
1069  bool isMSRMask() const { return Kind == k_MSRMask; }
1070  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1071
1072  // NEON operands.
1073  bool isSingleSpacedVectorList() const {
1074    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1075  }
1076  bool isDoubleSpacedVectorList() const {
1077    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1078  }
1079  bool isVecListOneD() const {
1080    if (!isSingleSpacedVectorList()) return false;
1081    return VectorList.Count == 1;
1082  }
1083
1084  bool isVecListTwoD() const {
1085    if (!isSingleSpacedVectorList()) return false;
1086    return VectorList.Count == 2;
1087  }
1088
1089  bool isVecListThreeD() const {
1090    if (!isSingleSpacedVectorList()) return false;
1091    return VectorList.Count == 3;
1092  }
1093
1094  bool isVecListFourD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 4;
1097  }
1098
1099  bool isVecListTwoQ() const {
1100    if (!isDoubleSpacedVectorList()) return false;
1101    return VectorList.Count == 2;
1102  }
1103
1104  bool isSingleSpacedVectorAllLanes() const {
1105    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1106  }
1107  bool isDoubleSpacedVectorAllLanes() const {
1108    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1109  }
1110  bool isVecListOneDAllLanes() const {
1111    if (!isSingleSpacedVectorAllLanes()) return false;
1112    return VectorList.Count == 1;
1113  }
1114
1115  bool isVecListTwoDAllLanes() const {
1116    if (!isSingleSpacedVectorAllLanes()) return false;
1117    return VectorList.Count == 2;
1118  }
1119
1120  bool isVecListTwoQAllLanes() const {
1121    if (!isDoubleSpacedVectorAllLanes()) return false;
1122    return VectorList.Count == 2;
1123  }
1124
1125  bool isSingleSpacedVectorIndexed() const {
1126    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1127  }
1128  bool isDoubleSpacedVectorIndexed() const {
1129    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1130  }
1131  bool isVecListOneDByteIndexed() const {
1132    if (!isSingleSpacedVectorIndexed()) return false;
1133    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1134  }
1135
1136  bool isVecListOneDHWordIndexed() const {
1137    if (!isSingleSpacedVectorIndexed()) return false;
1138    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1139  }
1140
1141  bool isVecListOneDWordIndexed() const {
1142    if (!isSingleSpacedVectorIndexed()) return false;
1143    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1144  }
1145
1146  bool isVecListTwoDByteIndexed() const {
1147    if (!isSingleSpacedVectorIndexed()) return false;
1148    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1149  }
1150
1151  bool isVecListTwoDHWordIndexed() const {
1152    if (!isSingleSpacedVectorIndexed()) return false;
1153    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1154  }
1155
1156  bool isVecListTwoQWordIndexed() const {
1157    if (!isDoubleSpacedVectorIndexed()) return false;
1158    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1159  }
1160
1161  bool isVecListTwoQHWordIndexed() const {
1162    if (!isDoubleSpacedVectorIndexed()) return false;
1163    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1164  }
1165
1166  bool isVecListTwoDWordIndexed() const {
1167    if (!isSingleSpacedVectorIndexed()) return false;
1168    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1169  }
1170
1171  bool isVectorIndex8() const {
1172    if (Kind != k_VectorIndex) return false;
1173    return VectorIndex.Val < 8;
1174  }
1175  bool isVectorIndex16() const {
1176    if (Kind != k_VectorIndex) return false;
1177    return VectorIndex.Val < 4;
1178  }
1179  bool isVectorIndex32() const {
1180    if (Kind != k_VectorIndex) return false;
1181    return VectorIndex.Val < 2;
1182  }
1183
1184  bool isNEONi8splat() const {
1185    if (!isImm()) return false;
1186    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1187    // Must be a constant.
1188    if (!CE) return false;
1189    int64_t Value = CE->getValue();
1190    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1191    // value.
1192    return Value >= 0 && Value < 256;
1193  }
1194
1195  bool isNEONi16splat() const {
1196    if (!isImm()) return false;
1197    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1198    // Must be a constant.
1199    if (!CE) return false;
1200    int64_t Value = CE->getValue();
1201    // i16 value in the range [0,255] or [0x0100, 0xff00]
1202    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1203  }
1204
1205  bool isNEONi32splat() const {
1206    if (!isImm()) return false;
1207    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1208    // Must be a constant.
1209    if (!CE) return false;
1210    int64_t Value = CE->getValue();
1211    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1212    return (Value >= 0 && Value < 256) ||
1213      (Value >= 0x0100 && Value <= 0xff00) ||
1214      (Value >= 0x010000 && Value <= 0xff0000) ||
1215      (Value >= 0x01000000 && Value <= 0xff000000);
1216  }
1217
1218  bool isNEONi32vmov() const {
1219    if (!isImm()) return false;
1220    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1221    // Must be a constant.
1222    if (!CE) return false;
1223    int64_t Value = CE->getValue();
1224    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1225    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1226    return (Value >= 0 && Value < 256) ||
1227      (Value >= 0x0100 && Value <= 0xff00) ||
1228      (Value >= 0x010000 && Value <= 0xff0000) ||
1229      (Value >= 0x01000000 && Value <= 0xff000000) ||
1230      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1231      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1232  }
1233  bool isNEONi32vmovNeg() const {
1234    if (!isImm()) return false;
1235    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1236    // Must be a constant.
1237    if (!CE) return false;
1238    int64_t Value = ~CE->getValue();
1239    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1240    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1241    return (Value >= 0 && Value < 256) ||
1242      (Value >= 0x0100 && Value <= 0xff00) ||
1243      (Value >= 0x010000 && Value <= 0xff0000) ||
1244      (Value >= 0x01000000 && Value <= 0xff000000) ||
1245      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1246      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1247  }
1248
1249  bool isNEONi64splat() const {
1250    if (!isImm()) return false;
1251    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1252    // Must be a constant.
1253    if (!CE) return false;
1254    uint64_t Value = CE->getValue();
1255    // i64 value with each byte being either 0 or 0xff.
1256    for (unsigned i = 0; i < 8; ++i)
1257      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1258    return true;
1259  }
1260
1261  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1262    // Add as immediates when possible.  Null MCExpr = 0.
1263    if (Expr == 0)
1264      Inst.addOperand(MCOperand::CreateImm(0));
1265    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1266      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1267    else
1268      Inst.addOperand(MCOperand::CreateExpr(Expr));
1269  }
1270
1271  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1272    assert(N == 2 && "Invalid number of operands!");
1273    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1274    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1275    Inst.addOperand(MCOperand::CreateReg(RegNum));
1276  }
1277
1278  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1279    assert(N == 1 && "Invalid number of operands!");
1280    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1281  }
1282
1283  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1284    assert(N == 1 && "Invalid number of operands!");
1285    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1286  }
1287
1288  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1289    assert(N == 1 && "Invalid number of operands!");
1290    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1291  }
1292
1293  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1294    assert(N == 1 && "Invalid number of operands!");
1295    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1296  }
1297
1298  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1299    assert(N == 1 && "Invalid number of operands!");
1300    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1301  }
1302
1303  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1304    assert(N == 1 && "Invalid number of operands!");
1305    Inst.addOperand(MCOperand::CreateReg(getReg()));
1306  }
1307
1308  void addRegOperands(MCInst &Inst, unsigned N) const {
1309    assert(N == 1 && "Invalid number of operands!");
1310    Inst.addOperand(MCOperand::CreateReg(getReg()));
1311  }
1312
1313  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1314    assert(N == 3 && "Invalid number of operands!");
1315    assert(isRegShiftedReg() &&
1316           "addRegShiftedRegOperands() on non RegShiftedReg!");
1317    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1318    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1319    Inst.addOperand(MCOperand::CreateImm(
1320      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1321  }
1322
1323  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1324    assert(N == 2 && "Invalid number of operands!");
1325    assert(isRegShiftedImm() &&
1326           "addRegShiftedImmOperands() on non RegShiftedImm!");
1327    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1328    Inst.addOperand(MCOperand::CreateImm(
1329      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1330  }
1331
1332  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1333    assert(N == 1 && "Invalid number of operands!");
1334    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1335                                         ShifterImm.Imm));
1336  }
1337
1338  void addRegListOperands(MCInst &Inst, unsigned N) const {
1339    assert(N == 1 && "Invalid number of operands!");
1340    const SmallVectorImpl<unsigned> &RegList = getRegList();
1341    for (SmallVectorImpl<unsigned>::const_iterator
1342           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1343      Inst.addOperand(MCOperand::CreateReg(*I));
1344  }
1345
1346  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1347    addRegListOperands(Inst, N);
1348  }
1349
1350  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1351    addRegListOperands(Inst, N);
1352  }
1353
1354  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1355    assert(N == 1 && "Invalid number of operands!");
1356    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1357    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1358  }
1359
1360  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1361    assert(N == 1 && "Invalid number of operands!");
1362    // Munge the lsb/width into a bitfield mask.
1363    unsigned lsb = Bitfield.LSB;
1364    unsigned width = Bitfield.Width;
1365    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1366    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1367                      (32 - (lsb + width)));
1368    Inst.addOperand(MCOperand::CreateImm(Mask));
1369  }
1370
1371  void addImmOperands(MCInst &Inst, unsigned N) const {
1372    assert(N == 1 && "Invalid number of operands!");
1373    addExpr(Inst, getImm());
1374  }
1375
1376  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1377    assert(N == 1 && "Invalid number of operands!");
1378    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1379    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1380  }
1381
1382  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1383    assert(N == 1 && "Invalid number of operands!");
1384    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1385    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1386  }
1387
1388  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1389    assert(N == 1 && "Invalid number of operands!");
1390    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1391    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1392    Inst.addOperand(MCOperand::CreateImm(Val));
1393  }
1394
1395  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1396    assert(N == 1 && "Invalid number of operands!");
1397    // FIXME: We really want to scale the value here, but the LDRD/STRD
1398    // instruction don't encode operands that way yet.
1399    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1400    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1401  }
1402
1403  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1404    assert(N == 1 && "Invalid number of operands!");
1405    // The immediate is scaled by four in the encoding and is stored
1406    // in the MCInst as such. Lop off the low two bits here.
1407    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1408    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1409  }
1410
1411  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1412    assert(N == 1 && "Invalid number of operands!");
1413    // The immediate is scaled by four in the encoding and is stored
1414    // in the MCInst as such. Lop off the low two bits here.
1415    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1416    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1417  }
1418
1419  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1420    assert(N == 1 && "Invalid number of operands!");
1421    // The constant encodes as the immediate-1, and we store in the instruction
1422    // the bits as encoded, so subtract off one here.
1423    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1424    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1425  }
1426
1427  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1428    assert(N == 1 && "Invalid number of operands!");
1429    // The constant encodes as the immediate-1, and we store in the instruction
1430    // the bits as encoded, so subtract off one here.
1431    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1432    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1433  }
1434
1435  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1436    assert(N == 1 && "Invalid number of operands!");
1437    // The constant encodes as the immediate, except for 32, which encodes as
1438    // zero.
1439    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1440    unsigned Imm = CE->getValue();
1441    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1442  }
1443
1444  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1445    assert(N == 1 && "Invalid number of operands!");
1446    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1447    // the instruction as well.
1448    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1449    int Val = CE->getValue();
1450    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1451  }
1452
1453  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1454    assert(N == 1 && "Invalid number of operands!");
1455    // The operand is actually a t2_so_imm, but we have its bitwise
1456    // negation in the assembly source, so twiddle it here.
1457    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1458    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1459  }
1460
1461  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1462    assert(N == 1 && "Invalid number of operands!");
1463    // The operand is actually a t2_so_imm, but we have its
1464    // negation in the assembly source, so twiddle it here.
1465    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1466    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1467  }
1468
1469  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1470    assert(N == 1 && "Invalid number of operands!");
1471    // The operand is actually a so_imm, but we have its bitwise
1472    // negation in the assembly source, so twiddle it here.
1473    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1474    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1475  }
1476
1477  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1478    assert(N == 1 && "Invalid number of operands!");
1479    // The operand is actually a so_imm, but we have its
1480    // negation in the assembly source, so twiddle it here.
1481    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1482    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1483  }
1484
1485  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1486    assert(N == 1 && "Invalid number of operands!");
1487    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1488  }
1489
1490  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1491    assert(N == 1 && "Invalid number of operands!");
1492    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1493  }
1494
1495  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    int32_t Imm = Memory.OffsetImm->getValue();
1498    // FIXME: Handle #-0
1499    if (Imm == INT32_MIN) Imm = 0;
1500    Inst.addOperand(MCOperand::CreateImm(Imm));
1501  }
1502
1503  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1504    assert(N == 2 && "Invalid number of operands!");
1505    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1506    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1507  }
1508
1509  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1510    assert(N == 3 && "Invalid number of operands!");
1511    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1512    if (!Memory.OffsetRegNum) {
1513      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1514      // Special case for #-0
1515      if (Val == INT32_MIN) Val = 0;
1516      if (Val < 0) Val = -Val;
1517      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1518    } else {
1519      // For register offset, we encode the shift type and negation flag
1520      // here.
1521      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1522                              Memory.ShiftImm, Memory.ShiftType);
1523    }
1524    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1525    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1526    Inst.addOperand(MCOperand::CreateImm(Val));
1527  }
1528
1529  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1530    assert(N == 2 && "Invalid number of operands!");
1531    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1532    assert(CE && "non-constant AM2OffsetImm operand!");
1533    int32_t Val = CE->getValue();
1534    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1535    // Special case for #-0
1536    if (Val == INT32_MIN) Val = 0;
1537    if (Val < 0) Val = -Val;
1538    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1539    Inst.addOperand(MCOperand::CreateReg(0));
1540    Inst.addOperand(MCOperand::CreateImm(Val));
1541  }
1542
1543  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1544    assert(N == 3 && "Invalid number of operands!");
1545    // If we have an immediate that's not a constant, treat it as a label
1546    // reference needing a fixup. If it is a constant, it's something else
1547    // and we reject it.
1548    if (isImm()) {
1549      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1550      Inst.addOperand(MCOperand::CreateReg(0));
1551      Inst.addOperand(MCOperand::CreateImm(0));
1552      return;
1553    }
1554
1555    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1556    if (!Memory.OffsetRegNum) {
1557      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1558      // Special case for #-0
1559      if (Val == INT32_MIN) Val = 0;
1560      if (Val < 0) Val = -Val;
1561      Val = ARM_AM::getAM3Opc(AddSub, Val);
1562    } else {
1563      // For register offset, we encode the shift type and negation flag
1564      // here.
1565      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1566    }
1567    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1568    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1569    Inst.addOperand(MCOperand::CreateImm(Val));
1570  }
1571
1572  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1573    assert(N == 2 && "Invalid number of operands!");
1574    if (Kind == k_PostIndexRegister) {
1575      int32_t Val =
1576        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1577      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1578      Inst.addOperand(MCOperand::CreateImm(Val));
1579      return;
1580    }
1581
1582    // Constant offset.
1583    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1584    int32_t Val = CE->getValue();
1585    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1586    // Special case for #-0
1587    if (Val == INT32_MIN) Val = 0;
1588    if (Val < 0) Val = -Val;
1589    Val = ARM_AM::getAM3Opc(AddSub, Val);
1590    Inst.addOperand(MCOperand::CreateReg(0));
1591    Inst.addOperand(MCOperand::CreateImm(Val));
1592  }
1593
1594  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1595    assert(N == 2 && "Invalid number of operands!");
1596    // If we have an immediate that's not a constant, treat it as a label
1597    // reference needing a fixup. If it is a constant, it's something else
1598    // and we reject it.
1599    if (isImm()) {
1600      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1601      Inst.addOperand(MCOperand::CreateImm(0));
1602      return;
1603    }
1604
1605    // The lower two bits are always zero and as such are not encoded.
1606    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1607    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1608    // Special case for #-0
1609    if (Val == INT32_MIN) Val = 0;
1610    if (Val < 0) Val = -Val;
1611    Val = ARM_AM::getAM5Opc(AddSub, Val);
1612    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1613    Inst.addOperand(MCOperand::CreateImm(Val));
1614  }
1615
1616  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1617    assert(N == 2 && "Invalid number of operands!");
1618    // If we have an immediate that's not a constant, treat it as a label
1619    // reference needing a fixup. If it is a constant, it's something else
1620    // and we reject it.
1621    if (isImm()) {
1622      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1623      Inst.addOperand(MCOperand::CreateImm(0));
1624      return;
1625    }
1626
1627    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1628    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1629    Inst.addOperand(MCOperand::CreateImm(Val));
1630  }
1631
1632  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1633    assert(N == 2 && "Invalid number of operands!");
1634    // The lower two bits are always zero and as such are not encoded.
1635    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1636    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1637    Inst.addOperand(MCOperand::CreateImm(Val));
1638  }
1639
1640  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1641    assert(N == 2 && "Invalid number of operands!");
1642    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1643    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1644    Inst.addOperand(MCOperand::CreateImm(Val));
1645  }
1646
1647  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1648    addMemImm8OffsetOperands(Inst, N);
1649  }
1650
1651  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1652    addMemImm8OffsetOperands(Inst, N);
1653  }
1654
1655  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1656    assert(N == 2 && "Invalid number of operands!");
1657    // If this is an immediate, it's a label reference.
1658    if (isImm()) {
1659      addExpr(Inst, getImm());
1660      Inst.addOperand(MCOperand::CreateImm(0));
1661      return;
1662    }
1663
1664    // Otherwise, it's a normal memory reg+offset.
1665    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1666    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1667    Inst.addOperand(MCOperand::CreateImm(Val));
1668  }
1669
1670  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1671    assert(N == 2 && "Invalid number of operands!");
1672    // If this is an immediate, it's a label reference.
1673    if (isImm()) {
1674      addExpr(Inst, getImm());
1675      Inst.addOperand(MCOperand::CreateImm(0));
1676      return;
1677    }
1678
1679    // Otherwise, it's a normal memory reg+offset.
1680    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1681    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1682    Inst.addOperand(MCOperand::CreateImm(Val));
1683  }
1684
1685  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1686    assert(N == 2 && "Invalid number of operands!");
1687    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1688    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1689  }
1690
1691  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1692    assert(N == 2 && "Invalid number of operands!");
1693    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1694    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1695  }
1696
1697  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1698    assert(N == 3 && "Invalid number of operands!");
1699    unsigned Val =
1700      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1701                        Memory.ShiftImm, Memory.ShiftType);
1702    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1703    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1704    Inst.addOperand(MCOperand::CreateImm(Val));
1705  }
1706
1707  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1708    assert(N == 3 && "Invalid number of operands!");
1709    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1710    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1711    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1712  }
1713
1714  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1715    assert(N == 2 && "Invalid number of operands!");
1716    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1717    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1718  }
1719
1720  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1721    assert(N == 2 && "Invalid number of operands!");
1722    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1723    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1724    Inst.addOperand(MCOperand::CreateImm(Val));
1725  }
1726
1727  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1728    assert(N == 2 && "Invalid number of operands!");
1729    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1730    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1731    Inst.addOperand(MCOperand::CreateImm(Val));
1732  }
1733
1734  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1735    assert(N == 2 && "Invalid number of operands!");
1736    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1737    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1738    Inst.addOperand(MCOperand::CreateImm(Val));
1739  }
1740
1741  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1742    assert(N == 2 && "Invalid number of operands!");
1743    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1744    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1745    Inst.addOperand(MCOperand::CreateImm(Val));
1746  }
1747
1748  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1749    assert(N == 1 && "Invalid number of operands!");
1750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1751    assert(CE && "non-constant post-idx-imm8 operand!");
1752    int Imm = CE->getValue();
1753    bool isAdd = Imm >= 0;
1754    if (Imm == INT32_MIN) Imm = 0;
1755    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1756    Inst.addOperand(MCOperand::CreateImm(Imm));
1757  }
1758
1759  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1760    assert(N == 1 && "Invalid number of operands!");
1761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1762    assert(CE && "non-constant post-idx-imm8s4 operand!");
1763    int Imm = CE->getValue();
1764    bool isAdd = Imm >= 0;
1765    if (Imm == INT32_MIN) Imm = 0;
1766    // Immediate is scaled by 4.
1767    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1768    Inst.addOperand(MCOperand::CreateImm(Imm));
1769  }
1770
1771  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1772    assert(N == 2 && "Invalid number of operands!");
1773    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1774    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1775  }
1776
1777  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1778    assert(N == 2 && "Invalid number of operands!");
1779    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1780    // The sign, shift type, and shift amount are encoded in a single operand
1781    // using the AM2 encoding helpers.
1782    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1783    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1784                                     PostIdxReg.ShiftTy);
1785    Inst.addOperand(MCOperand::CreateImm(Imm));
1786  }
1787
1788  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1789    assert(N == 1 && "Invalid number of operands!");
1790    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1791  }
1792
1793  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1794    assert(N == 1 && "Invalid number of operands!");
1795    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1796  }
1797
1798  void addVecListOperands(MCInst &Inst, unsigned N) const {
1799    assert(N == 1 && "Invalid number of operands!");
1800    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1801  }
1802
1803  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1804    assert(N == 2 && "Invalid number of operands!");
1805    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1806    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1807  }
1808
1809  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1810    assert(N == 1 && "Invalid number of operands!");
1811    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1812  }
1813
1814  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1815    assert(N == 1 && "Invalid number of operands!");
1816    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1817  }
1818
1819  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1820    assert(N == 1 && "Invalid number of operands!");
1821    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1822  }
1823
1824  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1825    assert(N == 1 && "Invalid number of operands!");
1826    // The immediate encodes the type of constant as well as the value.
1827    // Mask in that this is an i8 splat.
1828    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1829    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1830  }
1831
1832  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1833    assert(N == 1 && "Invalid number of operands!");
1834    // The immediate encodes the type of constant as well as the value.
1835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1836    unsigned Value = CE->getValue();
1837    if (Value >= 256)
1838      Value = (Value >> 8) | 0xa00;
1839    else
1840      Value |= 0x800;
1841    Inst.addOperand(MCOperand::CreateImm(Value));
1842  }
1843
1844  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1845    assert(N == 1 && "Invalid number of operands!");
1846    // The immediate encodes the type of constant as well as the value.
1847    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1848    unsigned Value = CE->getValue();
1849    if (Value >= 256 && Value <= 0xff00)
1850      Value = (Value >> 8) | 0x200;
1851    else if (Value > 0xffff && Value <= 0xff0000)
1852      Value = (Value >> 16) | 0x400;
1853    else if (Value > 0xffffff)
1854      Value = (Value >> 24) | 0x600;
1855    Inst.addOperand(MCOperand::CreateImm(Value));
1856  }
1857
1858  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1859    assert(N == 1 && "Invalid number of operands!");
1860    // The immediate encodes the type of constant as well as the value.
1861    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1862    unsigned Value = CE->getValue();
1863    if (Value >= 256 && Value <= 0xffff)
1864      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1865    else if (Value > 0xffff && Value <= 0xffffff)
1866      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1867    else if (Value > 0xffffff)
1868      Value = (Value >> 24) | 0x600;
1869    Inst.addOperand(MCOperand::CreateImm(Value));
1870  }
1871
1872  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1873    assert(N == 1 && "Invalid number of operands!");
1874    // The immediate encodes the type of constant as well as the value.
1875    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1876    unsigned Value = ~CE->getValue();
1877    if (Value >= 256 && Value <= 0xffff)
1878      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1879    else if (Value > 0xffff && Value <= 0xffffff)
1880      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1881    else if (Value > 0xffffff)
1882      Value = (Value >> 24) | 0x600;
1883    Inst.addOperand(MCOperand::CreateImm(Value));
1884  }
1885
1886  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1887    assert(N == 1 && "Invalid number of operands!");
1888    // The immediate encodes the type of constant as well as the value.
1889    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1890    uint64_t Value = CE->getValue();
1891    unsigned Imm = 0;
1892    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1893      Imm |= (Value & 1) << i;
1894    }
1895    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1896  }
1897
1898  virtual void print(raw_ostream &OS) const;
1899
1900  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1901    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1902    Op->ITMask.Mask = Mask;
1903    Op->StartLoc = S;
1904    Op->EndLoc = S;
1905    return Op;
1906  }
1907
1908  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1909    ARMOperand *Op = new ARMOperand(k_CondCode);
1910    Op->CC.Val = CC;
1911    Op->StartLoc = S;
1912    Op->EndLoc = S;
1913    return Op;
1914  }
1915
1916  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1917    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1918    Op->Cop.Val = CopVal;
1919    Op->StartLoc = S;
1920    Op->EndLoc = S;
1921    return Op;
1922  }
1923
1924  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1925    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1926    Op->Cop.Val = CopVal;
1927    Op->StartLoc = S;
1928    Op->EndLoc = S;
1929    return Op;
1930  }
1931
1932  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1933    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1934    Op->Cop.Val = Val;
1935    Op->StartLoc = S;
1936    Op->EndLoc = E;
1937    return Op;
1938  }
1939
1940  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1941    ARMOperand *Op = new ARMOperand(k_CCOut);
1942    Op->Reg.RegNum = RegNum;
1943    Op->StartLoc = S;
1944    Op->EndLoc = S;
1945    return Op;
1946  }
1947
1948  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1949    ARMOperand *Op = new ARMOperand(k_Token);
1950    Op->Tok.Data = Str.data();
1951    Op->Tok.Length = Str.size();
1952    Op->StartLoc = S;
1953    Op->EndLoc = S;
1954    return Op;
1955  }
1956
1957  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1958    ARMOperand *Op = new ARMOperand(k_Register);
1959    Op->Reg.RegNum = RegNum;
1960    Op->StartLoc = S;
1961    Op->EndLoc = E;
1962    return Op;
1963  }
1964
1965  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1966                                           unsigned SrcReg,
1967                                           unsigned ShiftReg,
1968                                           unsigned ShiftImm,
1969                                           SMLoc S, SMLoc E) {
1970    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1971    Op->RegShiftedReg.ShiftTy = ShTy;
1972    Op->RegShiftedReg.SrcReg = SrcReg;
1973    Op->RegShiftedReg.ShiftReg = ShiftReg;
1974    Op->RegShiftedReg.ShiftImm = ShiftImm;
1975    Op->StartLoc = S;
1976    Op->EndLoc = E;
1977    return Op;
1978  }
1979
1980  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1981                                            unsigned SrcReg,
1982                                            unsigned ShiftImm,
1983                                            SMLoc S, SMLoc E) {
1984    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1985    Op->RegShiftedImm.ShiftTy = ShTy;
1986    Op->RegShiftedImm.SrcReg = SrcReg;
1987    Op->RegShiftedImm.ShiftImm = ShiftImm;
1988    Op->StartLoc = S;
1989    Op->EndLoc = E;
1990    return Op;
1991  }
1992
1993  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1994                                   SMLoc S, SMLoc E) {
1995    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1996    Op->ShifterImm.isASR = isASR;
1997    Op->ShifterImm.Imm = Imm;
1998    Op->StartLoc = S;
1999    Op->EndLoc = E;
2000    return Op;
2001  }
2002
2003  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2004    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2005    Op->RotImm.Imm = Imm;
2006    Op->StartLoc = S;
2007    Op->EndLoc = E;
2008    return Op;
2009  }
2010
2011  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2012                                    SMLoc S, SMLoc E) {
2013    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2014    Op->Bitfield.LSB = LSB;
2015    Op->Bitfield.Width = Width;
2016    Op->StartLoc = S;
2017    Op->EndLoc = E;
2018    return Op;
2019  }
2020
2021  static ARMOperand *
2022  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2023                SMLoc StartLoc, SMLoc EndLoc) {
2024    KindTy Kind = k_RegisterList;
2025
2026    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2027      Kind = k_DPRRegisterList;
2028    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2029             contains(Regs.front().first))
2030      Kind = k_SPRRegisterList;
2031
2032    ARMOperand *Op = new ARMOperand(Kind);
2033    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2034           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2035      Op->Registers.push_back(I->first);
2036    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2037    Op->StartLoc = StartLoc;
2038    Op->EndLoc = EndLoc;
2039    return Op;
2040  }
2041
2042  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2043                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2044    ARMOperand *Op = new ARMOperand(k_VectorList);
2045    Op->VectorList.RegNum = RegNum;
2046    Op->VectorList.Count = Count;
2047    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2048    Op->StartLoc = S;
2049    Op->EndLoc = E;
2050    return Op;
2051  }
2052
2053  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2054                                              bool isDoubleSpaced,
2055                                              SMLoc S, SMLoc E) {
2056    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2057    Op->VectorList.RegNum = RegNum;
2058    Op->VectorList.Count = Count;
2059    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2060    Op->StartLoc = S;
2061    Op->EndLoc = E;
2062    return Op;
2063  }
2064
2065  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2066                                             unsigned Index,
2067                                             bool isDoubleSpaced,
2068                                             SMLoc S, SMLoc E) {
2069    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2070    Op->VectorList.RegNum = RegNum;
2071    Op->VectorList.Count = Count;
2072    Op->VectorList.LaneIndex = Index;
2073    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2074    Op->StartLoc = S;
2075    Op->EndLoc = E;
2076    return Op;
2077  }
2078
2079  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2080                                       MCContext &Ctx) {
2081    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2082    Op->VectorIndex.Val = Idx;
2083    Op->StartLoc = S;
2084    Op->EndLoc = E;
2085    return Op;
2086  }
2087
2088  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2089    ARMOperand *Op = new ARMOperand(k_Immediate);
2090    Op->Imm.Val = Val;
2091    Op->StartLoc = S;
2092    Op->EndLoc = E;
2093    return Op;
2094  }
2095
2096  static ARMOperand *CreateMem(unsigned BaseRegNum,
2097                               const MCConstantExpr *OffsetImm,
2098                               unsigned OffsetRegNum,
2099                               ARM_AM::ShiftOpc ShiftType,
2100                               unsigned ShiftImm,
2101                               unsigned Alignment,
2102                               bool isNegative,
2103                               SMLoc S, SMLoc E) {
2104    ARMOperand *Op = new ARMOperand(k_Memory);
2105    Op->Memory.BaseRegNum = BaseRegNum;
2106    Op->Memory.OffsetImm = OffsetImm;
2107    Op->Memory.OffsetRegNum = OffsetRegNum;
2108    Op->Memory.ShiftType = ShiftType;
2109    Op->Memory.ShiftImm = ShiftImm;
2110    Op->Memory.Alignment = Alignment;
2111    Op->Memory.isNegative = isNegative;
2112    Op->StartLoc = S;
2113    Op->EndLoc = E;
2114    return Op;
2115  }
2116
2117  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2118                                      ARM_AM::ShiftOpc ShiftTy,
2119                                      unsigned ShiftImm,
2120                                      SMLoc S, SMLoc E) {
2121    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2122    Op->PostIdxReg.RegNum = RegNum;
2123    Op->PostIdxReg.isAdd = isAdd;
2124    Op->PostIdxReg.ShiftTy = ShiftTy;
2125    Op->PostIdxReg.ShiftImm = ShiftImm;
2126    Op->StartLoc = S;
2127    Op->EndLoc = E;
2128    return Op;
2129  }
2130
2131  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2132    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2133    Op->MBOpt.Val = Opt;
2134    Op->StartLoc = S;
2135    Op->EndLoc = S;
2136    return Op;
2137  }
2138
2139  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2140    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2141    Op->IFlags.Val = IFlags;
2142    Op->StartLoc = S;
2143    Op->EndLoc = S;
2144    return Op;
2145  }
2146
2147  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2148    ARMOperand *Op = new ARMOperand(k_MSRMask);
2149    Op->MMask.Val = MMask;
2150    Op->StartLoc = S;
2151    Op->EndLoc = S;
2152    return Op;
2153  }
2154};
2155
2156} // end anonymous namespace.
2157
2158void ARMOperand::print(raw_ostream &OS) const {
2159  switch (Kind) {
2160  case k_CondCode:
2161    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2162    break;
2163  case k_CCOut:
2164    OS << "<ccout " << getReg() << ">";
2165    break;
2166  case k_ITCondMask: {
2167    static const char *MaskStr[] = {
2168      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2169      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2170    };
2171    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2172    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2173    break;
2174  }
2175  case k_CoprocNum:
2176    OS << "<coprocessor number: " << getCoproc() << ">";
2177    break;
2178  case k_CoprocReg:
2179    OS << "<coprocessor register: " << getCoproc() << ">";
2180    break;
2181  case k_CoprocOption:
2182    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2183    break;
2184  case k_MSRMask:
2185    OS << "<mask: " << getMSRMask() << ">";
2186    break;
2187  case k_Immediate:
2188    getImm()->print(OS);
2189    break;
2190  case k_MemBarrierOpt:
2191    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2192    break;
2193  case k_Memory:
2194    OS << "<memory "
2195       << " base:" << Memory.BaseRegNum;
2196    OS << ">";
2197    break;
2198  case k_PostIndexRegister:
2199    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2200       << PostIdxReg.RegNum;
2201    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2202      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2203         << PostIdxReg.ShiftImm;
2204    OS << ">";
2205    break;
2206  case k_ProcIFlags: {
2207    OS << "<ARM_PROC::";
2208    unsigned IFlags = getProcIFlags();
2209    for (int i=2; i >= 0; --i)
2210      if (IFlags & (1 << i))
2211        OS << ARM_PROC::IFlagsToString(1 << i);
2212    OS << ">";
2213    break;
2214  }
2215  case k_Register:
2216    OS << "<register " << getReg() << ">";
2217    break;
2218  case k_ShifterImmediate:
2219    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2220       << " #" << ShifterImm.Imm << ">";
2221    break;
2222  case k_ShiftedRegister:
2223    OS << "<so_reg_reg "
2224       << RegShiftedReg.SrcReg << " "
2225       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2226       << " " << RegShiftedReg.ShiftReg << ">";
2227    break;
2228  case k_ShiftedImmediate:
2229    OS << "<so_reg_imm "
2230       << RegShiftedImm.SrcReg << " "
2231       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2232       << " #" << RegShiftedImm.ShiftImm << ">";
2233    break;
2234  case k_RotateImmediate:
2235    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2236    break;
2237  case k_BitfieldDescriptor:
2238    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2239       << ", width: " << Bitfield.Width << ">";
2240    break;
2241  case k_RegisterList:
2242  case k_DPRRegisterList:
2243  case k_SPRRegisterList: {
2244    OS << "<register_list ";
2245
2246    const SmallVectorImpl<unsigned> &RegList = getRegList();
2247    for (SmallVectorImpl<unsigned>::const_iterator
2248           I = RegList.begin(), E = RegList.end(); I != E; ) {
2249      OS << *I;
2250      if (++I < E) OS << ", ";
2251    }
2252
2253    OS << ">";
2254    break;
2255  }
2256  case k_VectorList:
2257    OS << "<vector_list " << VectorList.Count << " * "
2258       << VectorList.RegNum << ">";
2259    break;
2260  case k_VectorListAllLanes:
2261    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2262       << VectorList.RegNum << ">";
2263    break;
2264  case k_VectorListIndexed:
2265    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2266       << VectorList.Count << " * " << VectorList.RegNum << ">";
2267    break;
2268  case k_Token:
2269    OS << "'" << getToken() << "'";
2270    break;
2271  case k_VectorIndex:
2272    OS << "<vectorindex " << getVectorIndex() << ">";
2273    break;
2274  }
2275}
2276
2277/// @name Auto-generated Match Functions
2278/// {
2279
2280static unsigned MatchRegisterName(StringRef Name);
2281
2282/// }
2283
2284bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2285                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2286  StartLoc = Parser.getTok().getLoc();
2287  RegNo = tryParseRegister();
2288  EndLoc = Parser.getTok().getLoc();
2289
2290  return (RegNo == (unsigned)-1);
2291}
2292
2293/// Try to parse a register name.  The token must be an Identifier when called,
2294/// and if it is a register name the token is eaten and the register number is
2295/// returned.  Otherwise return -1.
2296///
2297int ARMAsmParser::tryParseRegister() {
2298  const AsmToken &Tok = Parser.getTok();
2299  if (Tok.isNot(AsmToken::Identifier)) return -1;
2300
2301  std::string lowerCase = Tok.getString().lower();
2302  unsigned RegNum = MatchRegisterName(lowerCase);
2303  if (!RegNum) {
2304    RegNum = StringSwitch<unsigned>(lowerCase)
2305      .Case("r13", ARM::SP)
2306      .Case("r14", ARM::LR)
2307      .Case("r15", ARM::PC)
2308      .Case("ip", ARM::R12)
2309      // Additional register name aliases for 'gas' compatibility.
2310      .Case("a1", ARM::R0)
2311      .Case("a2", ARM::R1)
2312      .Case("a3", ARM::R2)
2313      .Case("a4", ARM::R3)
2314      .Case("v1", ARM::R4)
2315      .Case("v2", ARM::R5)
2316      .Case("v3", ARM::R6)
2317      .Case("v4", ARM::R7)
2318      .Case("v5", ARM::R8)
2319      .Case("v6", ARM::R9)
2320      .Case("v7", ARM::R10)
2321      .Case("v8", ARM::R11)
2322      .Case("sb", ARM::R9)
2323      .Case("sl", ARM::R10)
2324      .Case("fp", ARM::R11)
2325      .Default(0);
2326  }
2327  if (!RegNum) {
2328    // Check for aliases registered via .req. Canonicalize to lower case.
2329    // That's more consistent since register names are case insensitive, and
2330    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2331    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2332    // If no match, return failure.
2333    if (Entry == RegisterReqs.end())
2334      return -1;
2335    Parser.Lex(); // Eat identifier token.
2336    return Entry->getValue();
2337  }
2338
2339  Parser.Lex(); // Eat identifier token.
2340
2341  return RegNum;
2342}
2343
2344// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2345// If a recoverable error occurs, return 1. If an irrecoverable error
2346// occurs, return -1. An irrecoverable error is one where tokens have been
2347// consumed in the process of trying to parse the shifter (i.e., when it is
2348// indeed a shifter operand, but malformed).
2349int ARMAsmParser::tryParseShiftRegister(
2350                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2351  SMLoc S = Parser.getTok().getLoc();
2352  const AsmToken &Tok = Parser.getTok();
2353  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2354
2355  std::string lowerCase = Tok.getString().lower();
2356  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2357      .Case("asl", ARM_AM::lsl)
2358      .Case("lsl", ARM_AM::lsl)
2359      .Case("lsr", ARM_AM::lsr)
2360      .Case("asr", ARM_AM::asr)
2361      .Case("ror", ARM_AM::ror)
2362      .Case("rrx", ARM_AM::rrx)
2363      .Default(ARM_AM::no_shift);
2364
2365  if (ShiftTy == ARM_AM::no_shift)
2366    return 1;
2367
2368  Parser.Lex(); // Eat the operator.
2369
2370  // The source register for the shift has already been added to the
2371  // operand list, so we need to pop it off and combine it into the shifted
2372  // register operand instead.
2373  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2374  if (!PrevOp->isReg())
2375    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2376  int SrcReg = PrevOp->getReg();
2377  int64_t Imm = 0;
2378  int ShiftReg = 0;
2379  if (ShiftTy == ARM_AM::rrx) {
2380    // RRX Doesn't have an explicit shift amount. The encoder expects
2381    // the shift register to be the same as the source register. Seems odd,
2382    // but OK.
2383    ShiftReg = SrcReg;
2384  } else {
2385    // Figure out if this is shifted by a constant or a register (for non-RRX).
2386    if (Parser.getTok().is(AsmToken::Hash) ||
2387        Parser.getTok().is(AsmToken::Dollar)) {
2388      Parser.Lex(); // Eat hash.
2389      SMLoc ImmLoc = Parser.getTok().getLoc();
2390      const MCExpr *ShiftExpr = 0;
2391      if (getParser().ParseExpression(ShiftExpr)) {
2392        Error(ImmLoc, "invalid immediate shift value");
2393        return -1;
2394      }
2395      // The expression must be evaluatable as an immediate.
2396      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2397      if (!CE) {
2398        Error(ImmLoc, "invalid immediate shift value");
2399        return -1;
2400      }
2401      // Range check the immediate.
2402      // lsl, ror: 0 <= imm <= 31
2403      // lsr, asr: 0 <= imm <= 32
2404      Imm = CE->getValue();
2405      if (Imm < 0 ||
2406          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2407          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2408        Error(ImmLoc, "immediate shift value out of range");
2409        return -1;
2410      }
2411      // shift by zero is a nop. Always send it through as lsl.
2412      // ('as' compatibility)
2413      if (Imm == 0)
2414        ShiftTy = ARM_AM::lsl;
2415    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2416      ShiftReg = tryParseRegister();
2417      SMLoc L = Parser.getTok().getLoc();
2418      if (ShiftReg == -1) {
2419        Error (L, "expected immediate or register in shift operand");
2420        return -1;
2421      }
2422    } else {
2423      Error (Parser.getTok().getLoc(),
2424                    "expected immediate or register in shift operand");
2425      return -1;
2426    }
2427  }
2428
2429  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2430    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2431                                                         ShiftReg, Imm,
2432                                               S, Parser.getTok().getLoc()));
2433  else
2434    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2435                                               S, Parser.getTok().getLoc()));
2436
2437  return 0;
2438}
2439
2440
2441/// Try to parse a register name.  The token must be an Identifier when called.
2442/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2443/// if there is a "writeback". 'true' if it's not a register.
2444///
2445/// TODO this is likely to change to allow different register types and or to
2446/// parse for a specific register type.
2447bool ARMAsmParser::
2448tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2449  SMLoc S = Parser.getTok().getLoc();
2450  int RegNo = tryParseRegister();
2451  if (RegNo == -1)
2452    return true;
2453
2454  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2455
2456  const AsmToken &ExclaimTok = Parser.getTok();
2457  if (ExclaimTok.is(AsmToken::Exclaim)) {
2458    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2459                                               ExclaimTok.getLoc()));
2460    Parser.Lex(); // Eat exclaim token
2461    return false;
2462  }
2463
2464  // Also check for an index operand. This is only legal for vector registers,
2465  // but that'll get caught OK in operand matching, so we don't need to
2466  // explicitly filter everything else out here.
2467  if (Parser.getTok().is(AsmToken::LBrac)) {
2468    SMLoc SIdx = Parser.getTok().getLoc();
2469    Parser.Lex(); // Eat left bracket token.
2470
2471    const MCExpr *ImmVal;
2472    if (getParser().ParseExpression(ImmVal))
2473      return MatchOperand_ParseFail;
2474    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2475    if (!MCE) {
2476      TokError("immediate value expected for vector index");
2477      return MatchOperand_ParseFail;
2478    }
2479
2480    SMLoc E = Parser.getTok().getLoc();
2481    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2482      Error(E, "']' expected");
2483      return MatchOperand_ParseFail;
2484    }
2485
2486    Parser.Lex(); // Eat right bracket token.
2487
2488    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2489                                                     SIdx, E,
2490                                                     getContext()));
2491  }
2492
2493  return false;
2494}
2495
2496/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2497/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2498/// "c5", ...
2499static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2500  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2501  // but efficient.
2502  switch (Name.size()) {
2503  default: break;
2504  case 2:
2505    if (Name[0] != CoprocOp)
2506      return -1;
2507    switch (Name[1]) {
2508    default:  return -1;
2509    case '0': return 0;
2510    case '1': return 1;
2511    case '2': return 2;
2512    case '3': return 3;
2513    case '4': return 4;
2514    case '5': return 5;
2515    case '6': return 6;
2516    case '7': return 7;
2517    case '8': return 8;
2518    case '9': return 9;
2519    }
2520    break;
2521  case 3:
2522    if (Name[0] != CoprocOp || Name[1] != '1')
2523      return -1;
2524    switch (Name[2]) {
2525    default:  return -1;
2526    case '0': return 10;
2527    case '1': return 11;
2528    case '2': return 12;
2529    case '3': return 13;
2530    case '4': return 14;
2531    case '5': return 15;
2532    }
2533    break;
2534  }
2535
2536  return -1;
2537}
2538
2539/// parseITCondCode - Try to parse a condition code for an IT instruction.
2540ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2541parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2542  SMLoc S = Parser.getTok().getLoc();
2543  const AsmToken &Tok = Parser.getTok();
2544  if (!Tok.is(AsmToken::Identifier))
2545    return MatchOperand_NoMatch;
2546  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2547    .Case("eq", ARMCC::EQ)
2548    .Case("ne", ARMCC::NE)
2549    .Case("hs", ARMCC::HS)
2550    .Case("cs", ARMCC::HS)
2551    .Case("lo", ARMCC::LO)
2552    .Case("cc", ARMCC::LO)
2553    .Case("mi", ARMCC::MI)
2554    .Case("pl", ARMCC::PL)
2555    .Case("vs", ARMCC::VS)
2556    .Case("vc", ARMCC::VC)
2557    .Case("hi", ARMCC::HI)
2558    .Case("ls", ARMCC::LS)
2559    .Case("ge", ARMCC::GE)
2560    .Case("lt", ARMCC::LT)
2561    .Case("gt", ARMCC::GT)
2562    .Case("le", ARMCC::LE)
2563    .Case("al", ARMCC::AL)
2564    .Default(~0U);
2565  if (CC == ~0U)
2566    return MatchOperand_NoMatch;
2567  Parser.Lex(); // Eat the token.
2568
2569  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2570
2571  return MatchOperand_Success;
2572}
2573
2574/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2575/// token must be an Identifier when called, and if it is a coprocessor
2576/// number, the token is eaten and the operand is added to the operand list.
2577ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2578parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2579  SMLoc S = Parser.getTok().getLoc();
2580  const AsmToken &Tok = Parser.getTok();
2581  if (Tok.isNot(AsmToken::Identifier))
2582    return MatchOperand_NoMatch;
2583
2584  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2585  if (Num == -1)
2586    return MatchOperand_NoMatch;
2587
2588  Parser.Lex(); // Eat identifier token.
2589  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2590  return MatchOperand_Success;
2591}
2592
2593/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2594/// token must be an Identifier when called, and if it is a coprocessor
2595/// number, the token is eaten and the operand is added to the operand list.
2596ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2597parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2598  SMLoc S = Parser.getTok().getLoc();
2599  const AsmToken &Tok = Parser.getTok();
2600  if (Tok.isNot(AsmToken::Identifier))
2601    return MatchOperand_NoMatch;
2602
2603  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2604  if (Reg == -1)
2605    return MatchOperand_NoMatch;
2606
2607  Parser.Lex(); // Eat identifier token.
2608  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2609  return MatchOperand_Success;
2610}
2611
2612/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2613/// coproc_option : '{' imm0_255 '}'
2614ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2615parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2616  SMLoc S = Parser.getTok().getLoc();
2617
2618  // If this isn't a '{', this isn't a coprocessor immediate operand.
2619  if (Parser.getTok().isNot(AsmToken::LCurly))
2620    return MatchOperand_NoMatch;
2621  Parser.Lex(); // Eat the '{'
2622
2623  const MCExpr *Expr;
2624  SMLoc Loc = Parser.getTok().getLoc();
2625  if (getParser().ParseExpression(Expr)) {
2626    Error(Loc, "illegal expression");
2627    return MatchOperand_ParseFail;
2628  }
2629  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2630  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2631    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2632    return MatchOperand_ParseFail;
2633  }
2634  int Val = CE->getValue();
2635
2636  // Check for and consume the closing '}'
2637  if (Parser.getTok().isNot(AsmToken::RCurly))
2638    return MatchOperand_ParseFail;
2639  SMLoc E = Parser.getTok().getLoc();
2640  Parser.Lex(); // Eat the '}'
2641
2642  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2643  return MatchOperand_Success;
2644}
2645
2646// For register list parsing, we need to map from raw GPR register numbering
2647// to the enumeration values. The enumeration values aren't sorted by
2648// register number due to our using "sp", "lr" and "pc" as canonical names.
2649static unsigned getNextRegister(unsigned Reg) {
2650  // If this is a GPR, we need to do it manually, otherwise we can rely
2651  // on the sort ordering of the enumeration since the other reg-classes
2652  // are sane.
2653  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2654    return Reg + 1;
2655  switch(Reg) {
2656  default: assert(0 && "Invalid GPR number!");
2657  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2658  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2659  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2660  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2661  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2662  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2663  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2664  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2665  }
2666}
2667
2668// Return the low-subreg of a given Q register.
2669static unsigned getDRegFromQReg(unsigned QReg) {
2670  switch (QReg) {
2671  default: llvm_unreachable("expected a Q register!");
2672  case ARM::Q0:  return ARM::D0;
2673  case ARM::Q1:  return ARM::D2;
2674  case ARM::Q2:  return ARM::D4;
2675  case ARM::Q3:  return ARM::D6;
2676  case ARM::Q4:  return ARM::D8;
2677  case ARM::Q5:  return ARM::D10;
2678  case ARM::Q6:  return ARM::D12;
2679  case ARM::Q7:  return ARM::D14;
2680  case ARM::Q8:  return ARM::D16;
2681  case ARM::Q9:  return ARM::D18;
2682  case ARM::Q10: return ARM::D20;
2683  case ARM::Q11: return ARM::D22;
2684  case ARM::Q12: return ARM::D24;
2685  case ARM::Q13: return ARM::D26;
2686  case ARM::Q14: return ARM::D28;
2687  case ARM::Q15: return ARM::D30;
2688  }
2689}
2690
2691/// Parse a register list.
2692bool ARMAsmParser::
2693parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2694  assert(Parser.getTok().is(AsmToken::LCurly) &&
2695         "Token is not a Left Curly Brace");
2696  SMLoc S = Parser.getTok().getLoc();
2697  Parser.Lex(); // Eat '{' token.
2698  SMLoc RegLoc = Parser.getTok().getLoc();
2699
2700  // Check the first register in the list to see what register class
2701  // this is a list of.
2702  int Reg = tryParseRegister();
2703  if (Reg == -1)
2704    return Error(RegLoc, "register expected");
2705
2706  // The reglist instructions have at most 16 registers, so reserve
2707  // space for that many.
2708  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2709
2710  // Allow Q regs and just interpret them as the two D sub-registers.
2711  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2712    Reg = getDRegFromQReg(Reg);
2713    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2714    ++Reg;
2715  }
2716  const MCRegisterClass *RC;
2717  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2718    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2719  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2720    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2721  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2722    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2723  else
2724    return Error(RegLoc, "invalid register in register list");
2725
2726  // Store the register.
2727  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2728
2729  // This starts immediately after the first register token in the list,
2730  // so we can see either a comma or a minus (range separator) as a legal
2731  // next token.
2732  while (Parser.getTok().is(AsmToken::Comma) ||
2733         Parser.getTok().is(AsmToken::Minus)) {
2734    if (Parser.getTok().is(AsmToken::Minus)) {
2735      Parser.Lex(); // Eat the minus.
2736      SMLoc EndLoc = Parser.getTok().getLoc();
2737      int EndReg = tryParseRegister();
2738      if (EndReg == -1)
2739        return Error(EndLoc, "register expected");
2740      // Allow Q regs and just interpret them as the two D sub-registers.
2741      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2742        EndReg = getDRegFromQReg(EndReg) + 1;
2743      // If the register is the same as the start reg, there's nothing
2744      // more to do.
2745      if (Reg == EndReg)
2746        continue;
2747      // The register must be in the same register class as the first.
2748      if (!RC->contains(EndReg))
2749        return Error(EndLoc, "invalid register in register list");
2750      // Ranges must go from low to high.
2751      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2752        return Error(EndLoc, "bad range in register list");
2753
2754      // Add all the registers in the range to the register list.
2755      while (Reg != EndReg) {
2756        Reg = getNextRegister(Reg);
2757        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2758      }
2759      continue;
2760    }
2761    Parser.Lex(); // Eat the comma.
2762    RegLoc = Parser.getTok().getLoc();
2763    int OldReg = Reg;
2764    const AsmToken RegTok = Parser.getTok();
2765    Reg = tryParseRegister();
2766    if (Reg == -1)
2767      return Error(RegLoc, "register expected");
2768    // Allow Q regs and just interpret them as the two D sub-registers.
2769    bool isQReg = false;
2770    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2771      Reg = getDRegFromQReg(Reg);
2772      isQReg = true;
2773    }
2774    // The register must be in the same register class as the first.
2775    if (!RC->contains(Reg))
2776      return Error(RegLoc, "invalid register in register list");
2777    // List must be monotonically increasing.
2778    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2779      return Error(RegLoc, "register list not in ascending order");
2780    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2781      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2782              ") in register list");
2783      continue;
2784    }
2785    // VFP register lists must also be contiguous.
2786    // It's OK to use the enumeration values directly here rather, as the
2787    // VFP register classes have the enum sorted properly.
2788    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2789        Reg != OldReg + 1)
2790      return Error(RegLoc, "non-contiguous register range");
2791    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2792    if (isQReg)
2793      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2794  }
2795
2796  SMLoc E = Parser.getTok().getLoc();
2797  if (Parser.getTok().isNot(AsmToken::RCurly))
2798    return Error(E, "'}' expected");
2799  Parser.Lex(); // Eat '}' token.
2800
2801  // Push the register list operand.
2802  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2803
2804  // The ARM system instruction variants for LDM/STM have a '^' token here.
2805  if (Parser.getTok().is(AsmToken::Caret)) {
2806    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2807    Parser.Lex(); // Eat '^' token.
2808  }
2809
2810  return false;
2811}
2812
2813// Helper function to parse the lane index for vector lists.
2814ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2815parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2816  Index = 0; // Always return a defined index value.
2817  if (Parser.getTok().is(AsmToken::LBrac)) {
2818    Parser.Lex(); // Eat the '['.
2819    if (Parser.getTok().is(AsmToken::RBrac)) {
2820      // "Dn[]" is the 'all lanes' syntax.
2821      LaneKind = AllLanes;
2822      Parser.Lex(); // Eat the ']'.
2823      return MatchOperand_Success;
2824    }
2825    const MCExpr *LaneIndex;
2826    SMLoc Loc = Parser.getTok().getLoc();
2827    if (getParser().ParseExpression(LaneIndex)) {
2828      Error(Loc, "illegal expression");
2829      return MatchOperand_ParseFail;
2830    }
2831    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2832    if (!CE) {
2833      Error(Loc, "lane index must be empty or an integer");
2834      return MatchOperand_ParseFail;
2835    }
2836    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2837      Error(Parser.getTok().getLoc(), "']' expected");
2838      return MatchOperand_ParseFail;
2839    }
2840    Parser.Lex(); // Eat the ']'.
2841    int64_t Val = CE->getValue();
2842
2843    // FIXME: Make this range check context sensitive for .8, .16, .32.
2844    if (Val < 0 || Val > 7) {
2845      Error(Parser.getTok().getLoc(), "lane index out of range");
2846      return MatchOperand_ParseFail;
2847    }
2848    Index = Val;
2849    LaneKind = IndexedLane;
2850    return MatchOperand_Success;
2851  }
2852  LaneKind = NoLanes;
2853  return MatchOperand_Success;
2854}
2855
2856// parse a vector register list
2857ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2858parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2859  VectorLaneTy LaneKind;
2860  unsigned LaneIndex;
2861  SMLoc S = Parser.getTok().getLoc();
2862  // As an extension (to match gas), support a plain D register or Q register
2863  // (without encosing curly braces) as a single or double entry list,
2864  // respectively.
2865  if (Parser.getTok().is(AsmToken::Identifier)) {
2866    int Reg = tryParseRegister();
2867    if (Reg == -1)
2868      return MatchOperand_NoMatch;
2869    SMLoc E = Parser.getTok().getLoc();
2870    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2871      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2872      if (Res != MatchOperand_Success)
2873        return Res;
2874      switch (LaneKind) {
2875      case NoLanes:
2876        E = Parser.getTok().getLoc();
2877        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2878        break;
2879      case AllLanes:
2880        E = Parser.getTok().getLoc();
2881        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2882                                                                S, E));
2883        break;
2884      case IndexedLane:
2885        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2886                                                               LaneIndex,
2887                                                               false, S, E));
2888        break;
2889      }
2890      return MatchOperand_Success;
2891    }
2892    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2893      Reg = getDRegFromQReg(Reg);
2894      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2895      if (Res != MatchOperand_Success)
2896        return Res;
2897      switch (LaneKind) {
2898      case NoLanes:
2899        E = Parser.getTok().getLoc();
2900        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2901        break;
2902      case AllLanes:
2903        E = Parser.getTok().getLoc();
2904        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2905                                                                S, E));
2906        break;
2907      case IndexedLane:
2908        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2909                                                               LaneIndex,
2910                                                               false, S, E));
2911        break;
2912      }
2913      return MatchOperand_Success;
2914    }
2915    Error(S, "vector register expected");
2916    return MatchOperand_ParseFail;
2917  }
2918
2919  if (Parser.getTok().isNot(AsmToken::LCurly))
2920    return MatchOperand_NoMatch;
2921
2922  Parser.Lex(); // Eat '{' token.
2923  SMLoc RegLoc = Parser.getTok().getLoc();
2924
2925  int Reg = tryParseRegister();
2926  if (Reg == -1) {
2927    Error(RegLoc, "register expected");
2928    return MatchOperand_ParseFail;
2929  }
2930  unsigned Count = 1;
2931  int Spacing = 0;
2932  unsigned FirstReg = Reg;
2933  // The list is of D registers, but we also allow Q regs and just interpret
2934  // them as the two D sub-registers.
2935  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2936    FirstReg = Reg = getDRegFromQReg(Reg);
2937    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2938                 // it's ambiguous with four-register single spaced.
2939    ++Reg;
2940    ++Count;
2941  }
2942  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2943    return MatchOperand_ParseFail;
2944
2945  while (Parser.getTok().is(AsmToken::Comma) ||
2946         Parser.getTok().is(AsmToken::Minus)) {
2947    if (Parser.getTok().is(AsmToken::Minus)) {
2948      if (!Spacing)
2949        Spacing = 1; // Register range implies a single spaced list.
2950      else if (Spacing == 2) {
2951        Error(Parser.getTok().getLoc(),
2952              "sequential registers in double spaced list");
2953        return MatchOperand_ParseFail;
2954      }
2955      Parser.Lex(); // Eat the minus.
2956      SMLoc EndLoc = Parser.getTok().getLoc();
2957      int EndReg = tryParseRegister();
2958      if (EndReg == -1) {
2959        Error(EndLoc, "register expected");
2960        return MatchOperand_ParseFail;
2961      }
2962      // Allow Q regs and just interpret them as the two D sub-registers.
2963      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2964        EndReg = getDRegFromQReg(EndReg) + 1;
2965      // If the register is the same as the start reg, there's nothing
2966      // more to do.
2967      if (Reg == EndReg)
2968        continue;
2969      // The register must be in the same register class as the first.
2970      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2971        Error(EndLoc, "invalid register in register list");
2972        return MatchOperand_ParseFail;
2973      }
2974      // Ranges must go from low to high.
2975      if (Reg > EndReg) {
2976        Error(EndLoc, "bad range in register list");
2977        return MatchOperand_ParseFail;
2978      }
2979      // Parse the lane specifier if present.
2980      VectorLaneTy NextLaneKind;
2981      unsigned NextLaneIndex;
2982      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2983        return MatchOperand_ParseFail;
2984      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2985        Error(EndLoc, "mismatched lane index in register list");
2986        return MatchOperand_ParseFail;
2987      }
2988      EndLoc = Parser.getTok().getLoc();
2989
2990      // Add all the registers in the range to the register list.
2991      Count += EndReg - Reg;
2992      Reg = EndReg;
2993      continue;
2994    }
2995    Parser.Lex(); // Eat the comma.
2996    RegLoc = Parser.getTok().getLoc();
2997    int OldReg = Reg;
2998    Reg = tryParseRegister();
2999    if (Reg == -1) {
3000      Error(RegLoc, "register expected");
3001      return MatchOperand_ParseFail;
3002    }
3003    // vector register lists must be contiguous.
3004    // It's OK to use the enumeration values directly here rather, as the
3005    // VFP register classes have the enum sorted properly.
3006    //
3007    // The list is of D registers, but we also allow Q regs and just interpret
3008    // them as the two D sub-registers.
3009    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3010      if (!Spacing)
3011        Spacing = 1; // Register range implies a single spaced list.
3012      else if (Spacing == 2) {
3013        Error(RegLoc,
3014              "invalid register in double-spaced list (must be 'D' register')");
3015        return MatchOperand_ParseFail;
3016      }
3017      Reg = getDRegFromQReg(Reg);
3018      if (Reg != OldReg + 1) {
3019        Error(RegLoc, "non-contiguous register range");
3020        return MatchOperand_ParseFail;
3021      }
3022      ++Reg;
3023      Count += 2;
3024      // Parse the lane specifier if present.
3025      VectorLaneTy NextLaneKind;
3026      unsigned NextLaneIndex;
3027      SMLoc EndLoc = Parser.getTok().getLoc();
3028      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3029        return MatchOperand_ParseFail;
3030      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3031        Error(EndLoc, "mismatched lane index in register list");
3032        return MatchOperand_ParseFail;
3033      }
3034      continue;
3035    }
3036    // Normal D register.
3037    // Figure out the register spacing (single or double) of the list if
3038    // we don't know it already.
3039    if (!Spacing)
3040      Spacing = 1 + (Reg == OldReg + 2);
3041
3042    // Just check that it's contiguous and keep going.
3043    if (Reg != OldReg + Spacing) {
3044      Error(RegLoc, "non-contiguous register range");
3045      return MatchOperand_ParseFail;
3046    }
3047    ++Count;
3048    // Parse the lane specifier if present.
3049    VectorLaneTy NextLaneKind;
3050    unsigned NextLaneIndex;
3051    SMLoc EndLoc = Parser.getTok().getLoc();
3052    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3053      return MatchOperand_ParseFail;
3054    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3055      Error(EndLoc, "mismatched lane index in register list");
3056      return MatchOperand_ParseFail;
3057    }
3058  }
3059
3060  SMLoc E = Parser.getTok().getLoc();
3061  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3062    Error(E, "'}' expected");
3063    return MatchOperand_ParseFail;
3064  }
3065  Parser.Lex(); // Eat '}' token.
3066
3067  switch (LaneKind) {
3068  case NoLanes:
3069    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3070                                                    (Spacing == 2), S, E));
3071    break;
3072  case AllLanes:
3073    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3074                                                            (Spacing == 2),
3075                                                            S, E));
3076    break;
3077  case IndexedLane:
3078    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3079                                                           LaneIndex,
3080                                                           (Spacing == 2),
3081                                                           S, E));
3082    break;
3083  }
3084  return MatchOperand_Success;
3085}
3086
3087/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3088ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3089parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3090  SMLoc S = Parser.getTok().getLoc();
3091  const AsmToken &Tok = Parser.getTok();
3092  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3093  StringRef OptStr = Tok.getString();
3094
3095  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3096    .Case("sy",    ARM_MB::SY)
3097    .Case("st",    ARM_MB::ST)
3098    .Case("sh",    ARM_MB::ISH)
3099    .Case("ish",   ARM_MB::ISH)
3100    .Case("shst",  ARM_MB::ISHST)
3101    .Case("ishst", ARM_MB::ISHST)
3102    .Case("nsh",   ARM_MB::NSH)
3103    .Case("un",    ARM_MB::NSH)
3104    .Case("nshst", ARM_MB::NSHST)
3105    .Case("unst",  ARM_MB::NSHST)
3106    .Case("osh",   ARM_MB::OSH)
3107    .Case("oshst", ARM_MB::OSHST)
3108    .Default(~0U);
3109
3110  if (Opt == ~0U)
3111    return MatchOperand_NoMatch;
3112
3113  Parser.Lex(); // Eat identifier token.
3114  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3115  return MatchOperand_Success;
3116}
3117
3118/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3119ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3120parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3121  SMLoc S = Parser.getTok().getLoc();
3122  const AsmToken &Tok = Parser.getTok();
3123  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3124  StringRef IFlagsStr = Tok.getString();
3125
3126  // An iflags string of "none" is interpreted to mean that none of the AIF
3127  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3128  unsigned IFlags = 0;
3129  if (IFlagsStr != "none") {
3130        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3131      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3132        .Case("a", ARM_PROC::A)
3133        .Case("i", ARM_PROC::I)
3134        .Case("f", ARM_PROC::F)
3135        .Default(~0U);
3136
3137      // If some specific iflag is already set, it means that some letter is
3138      // present more than once, this is not acceptable.
3139      if (Flag == ~0U || (IFlags & Flag))
3140        return MatchOperand_NoMatch;
3141
3142      IFlags |= Flag;
3143    }
3144  }
3145
3146  Parser.Lex(); // Eat identifier token.
3147  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3148  return MatchOperand_Success;
3149}
3150
3151/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3152ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3153parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3154  SMLoc S = Parser.getTok().getLoc();
3155  const AsmToken &Tok = Parser.getTok();
3156  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3157  StringRef Mask = Tok.getString();
3158
3159  if (isMClass()) {
3160    // See ARMv6-M 10.1.1
3161    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3162      .Case("apsr", 0)
3163      .Case("iapsr", 1)
3164      .Case("eapsr", 2)
3165      .Case("xpsr", 3)
3166      .Case("ipsr", 5)
3167      .Case("epsr", 6)
3168      .Case("iepsr", 7)
3169      .Case("msp", 8)
3170      .Case("psp", 9)
3171      .Case("primask", 16)
3172      .Case("basepri", 17)
3173      .Case("basepri_max", 18)
3174      .Case("faultmask", 19)
3175      .Case("control", 20)
3176      .Default(~0U);
3177
3178    if (FlagsVal == ~0U)
3179      return MatchOperand_NoMatch;
3180
3181    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3182      // basepri, basepri_max and faultmask only valid for V7m.
3183      return MatchOperand_NoMatch;
3184
3185    Parser.Lex(); // Eat identifier token.
3186    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3187    return MatchOperand_Success;
3188  }
3189
3190  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3191  size_t Start = 0, Next = Mask.find('_');
3192  StringRef Flags = "";
3193  std::string SpecReg = Mask.slice(Start, Next).lower();
3194  if (Next != StringRef::npos)
3195    Flags = Mask.slice(Next+1, Mask.size());
3196
3197  // FlagsVal contains the complete mask:
3198  // 3-0: Mask
3199  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3200  unsigned FlagsVal = 0;
3201
3202  if (SpecReg == "apsr") {
3203    FlagsVal = StringSwitch<unsigned>(Flags)
3204    .Case("nzcvq",  0x8) // same as CPSR_f
3205    .Case("g",      0x4) // same as CPSR_s
3206    .Case("nzcvqg", 0xc) // same as CPSR_fs
3207    .Default(~0U);
3208
3209    if (FlagsVal == ~0U) {
3210      if (!Flags.empty())
3211        return MatchOperand_NoMatch;
3212      else
3213        FlagsVal = 8; // No flag
3214    }
3215  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3216    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3217      Flags = "fc";
3218    for (int i = 0, e = Flags.size(); i != e; ++i) {
3219      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3220      .Case("c", 1)
3221      .Case("x", 2)
3222      .Case("s", 4)
3223      .Case("f", 8)
3224      .Default(~0U);
3225
3226      // If some specific flag is already set, it means that some letter is
3227      // present more than once, this is not acceptable.
3228      if (FlagsVal == ~0U || (FlagsVal & Flag))
3229        return MatchOperand_NoMatch;
3230      FlagsVal |= Flag;
3231    }
3232  } else // No match for special register.
3233    return MatchOperand_NoMatch;
3234
3235  // Special register without flags is NOT equivalent to "fc" flags.
3236  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3237  // two lines would enable gas compatibility at the expense of breaking
3238  // round-tripping.
3239  //
3240  // if (!FlagsVal)
3241  //  FlagsVal = 0x9;
3242
3243  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3244  if (SpecReg == "spsr")
3245    FlagsVal |= 16;
3246
3247  Parser.Lex(); // Eat identifier token.
3248  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3249  return MatchOperand_Success;
3250}
3251
3252ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3253parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3254            int Low, int High) {
3255  const AsmToken &Tok = Parser.getTok();
3256  if (Tok.isNot(AsmToken::Identifier)) {
3257    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3258    return MatchOperand_ParseFail;
3259  }
3260  StringRef ShiftName = Tok.getString();
3261  std::string LowerOp = Op.lower();
3262  std::string UpperOp = Op.upper();
3263  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3264    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3265    return MatchOperand_ParseFail;
3266  }
3267  Parser.Lex(); // Eat shift type token.
3268
3269  // There must be a '#' and a shift amount.
3270  if (Parser.getTok().isNot(AsmToken::Hash) &&
3271      Parser.getTok().isNot(AsmToken::Dollar)) {
3272    Error(Parser.getTok().getLoc(), "'#' expected");
3273    return MatchOperand_ParseFail;
3274  }
3275  Parser.Lex(); // Eat hash token.
3276
3277  const MCExpr *ShiftAmount;
3278  SMLoc Loc = Parser.getTok().getLoc();
3279  if (getParser().ParseExpression(ShiftAmount)) {
3280    Error(Loc, "illegal expression");
3281    return MatchOperand_ParseFail;
3282  }
3283  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3284  if (!CE) {
3285    Error(Loc, "constant expression expected");
3286    return MatchOperand_ParseFail;
3287  }
3288  int Val = CE->getValue();
3289  if (Val < Low || Val > High) {
3290    Error(Loc, "immediate value out of range");
3291    return MatchOperand_ParseFail;
3292  }
3293
3294  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3295
3296  return MatchOperand_Success;
3297}
3298
3299ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3300parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3301  const AsmToken &Tok = Parser.getTok();
3302  SMLoc S = Tok.getLoc();
3303  if (Tok.isNot(AsmToken::Identifier)) {
3304    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3305    return MatchOperand_ParseFail;
3306  }
3307  int Val = StringSwitch<int>(Tok.getString())
3308    .Case("be", 1)
3309    .Case("le", 0)
3310    .Default(-1);
3311  Parser.Lex(); // Eat the token.
3312
3313  if (Val == -1) {
3314    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3315    return MatchOperand_ParseFail;
3316  }
3317  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3318                                                                  getContext()),
3319                                           S, Parser.getTok().getLoc()));
3320  return MatchOperand_Success;
3321}
3322
3323/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3324/// instructions. Legal values are:
3325///     lsl #n  'n' in [0,31]
3326///     asr #n  'n' in [1,32]
3327///             n == 32 encoded as n == 0.
3328ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3329parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3330  const AsmToken &Tok = Parser.getTok();
3331  SMLoc S = Tok.getLoc();
3332  if (Tok.isNot(AsmToken::Identifier)) {
3333    Error(S, "shift operator 'asr' or 'lsl' expected");
3334    return MatchOperand_ParseFail;
3335  }
3336  StringRef ShiftName = Tok.getString();
3337  bool isASR;
3338  if (ShiftName == "lsl" || ShiftName == "LSL")
3339    isASR = false;
3340  else if (ShiftName == "asr" || ShiftName == "ASR")
3341    isASR = true;
3342  else {
3343    Error(S, "shift operator 'asr' or 'lsl' expected");
3344    return MatchOperand_ParseFail;
3345  }
3346  Parser.Lex(); // Eat the operator.
3347
3348  // A '#' and a shift amount.
3349  if (Parser.getTok().isNot(AsmToken::Hash) &&
3350      Parser.getTok().isNot(AsmToken::Dollar)) {
3351    Error(Parser.getTok().getLoc(), "'#' expected");
3352    return MatchOperand_ParseFail;
3353  }
3354  Parser.Lex(); // Eat hash token.
3355
3356  const MCExpr *ShiftAmount;
3357  SMLoc E = Parser.getTok().getLoc();
3358  if (getParser().ParseExpression(ShiftAmount)) {
3359    Error(E, "malformed shift expression");
3360    return MatchOperand_ParseFail;
3361  }
3362  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3363  if (!CE) {
3364    Error(E, "shift amount must be an immediate");
3365    return MatchOperand_ParseFail;
3366  }
3367
3368  int64_t Val = CE->getValue();
3369  if (isASR) {
3370    // Shift amount must be in [1,32]
3371    if (Val < 1 || Val > 32) {
3372      Error(E, "'asr' shift amount must be in range [1,32]");
3373      return MatchOperand_ParseFail;
3374    }
3375    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3376    if (isThumb() && Val == 32) {
3377      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3378      return MatchOperand_ParseFail;
3379    }
3380    if (Val == 32) Val = 0;
3381  } else {
3382    // Shift amount must be in [1,32]
3383    if (Val < 0 || Val > 31) {
3384      Error(E, "'lsr' shift amount must be in range [0,31]");
3385      return MatchOperand_ParseFail;
3386    }
3387  }
3388
3389  E = Parser.getTok().getLoc();
3390  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3391
3392  return MatchOperand_Success;
3393}
3394
3395/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3396/// of instructions. Legal values are:
3397///     ror #n  'n' in {0, 8, 16, 24}
3398ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3399parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3400  const AsmToken &Tok = Parser.getTok();
3401  SMLoc S = Tok.getLoc();
3402  if (Tok.isNot(AsmToken::Identifier))
3403    return MatchOperand_NoMatch;
3404  StringRef ShiftName = Tok.getString();
3405  if (ShiftName != "ror" && ShiftName != "ROR")
3406    return MatchOperand_NoMatch;
3407  Parser.Lex(); // Eat the operator.
3408
3409  // A '#' and a rotate amount.
3410  if (Parser.getTok().isNot(AsmToken::Hash) &&
3411      Parser.getTok().isNot(AsmToken::Dollar)) {
3412    Error(Parser.getTok().getLoc(), "'#' expected");
3413    return MatchOperand_ParseFail;
3414  }
3415  Parser.Lex(); // Eat hash token.
3416
3417  const MCExpr *ShiftAmount;
3418  SMLoc E = Parser.getTok().getLoc();
3419  if (getParser().ParseExpression(ShiftAmount)) {
3420    Error(E, "malformed rotate expression");
3421    return MatchOperand_ParseFail;
3422  }
3423  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3424  if (!CE) {
3425    Error(E, "rotate amount must be an immediate");
3426    return MatchOperand_ParseFail;
3427  }
3428
3429  int64_t Val = CE->getValue();
3430  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3431  // normally, zero is represented in asm by omitting the rotate operand
3432  // entirely.
3433  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3434    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3435    return MatchOperand_ParseFail;
3436  }
3437
3438  E = Parser.getTok().getLoc();
3439  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3440
3441  return MatchOperand_Success;
3442}
3443
3444ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3445parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3446  SMLoc S = Parser.getTok().getLoc();
3447  // The bitfield descriptor is really two operands, the LSB and the width.
3448  if (Parser.getTok().isNot(AsmToken::Hash) &&
3449      Parser.getTok().isNot(AsmToken::Dollar)) {
3450    Error(Parser.getTok().getLoc(), "'#' expected");
3451    return MatchOperand_ParseFail;
3452  }
3453  Parser.Lex(); // Eat hash token.
3454
3455  const MCExpr *LSBExpr;
3456  SMLoc E = Parser.getTok().getLoc();
3457  if (getParser().ParseExpression(LSBExpr)) {
3458    Error(E, "malformed immediate expression");
3459    return MatchOperand_ParseFail;
3460  }
3461  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3462  if (!CE) {
3463    Error(E, "'lsb' operand must be an immediate");
3464    return MatchOperand_ParseFail;
3465  }
3466
3467  int64_t LSB = CE->getValue();
3468  // The LSB must be in the range [0,31]
3469  if (LSB < 0 || LSB > 31) {
3470    Error(E, "'lsb' operand must be in the range [0,31]");
3471    return MatchOperand_ParseFail;
3472  }
3473  E = Parser.getTok().getLoc();
3474
3475  // Expect another immediate operand.
3476  if (Parser.getTok().isNot(AsmToken::Comma)) {
3477    Error(Parser.getTok().getLoc(), "too few operands");
3478    return MatchOperand_ParseFail;
3479  }
3480  Parser.Lex(); // Eat hash token.
3481  if (Parser.getTok().isNot(AsmToken::Hash) &&
3482      Parser.getTok().isNot(AsmToken::Dollar)) {
3483    Error(Parser.getTok().getLoc(), "'#' expected");
3484    return MatchOperand_ParseFail;
3485  }
3486  Parser.Lex(); // Eat hash token.
3487
3488  const MCExpr *WidthExpr;
3489  if (getParser().ParseExpression(WidthExpr)) {
3490    Error(E, "malformed immediate expression");
3491    return MatchOperand_ParseFail;
3492  }
3493  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3494  if (!CE) {
3495    Error(E, "'width' operand must be an immediate");
3496    return MatchOperand_ParseFail;
3497  }
3498
3499  int64_t Width = CE->getValue();
3500  // The LSB must be in the range [1,32-lsb]
3501  if (Width < 1 || Width > 32 - LSB) {
3502    Error(E, "'width' operand must be in the range [1,32-lsb]");
3503    return MatchOperand_ParseFail;
3504  }
3505  E = Parser.getTok().getLoc();
3506
3507  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3508
3509  return MatchOperand_Success;
3510}
3511
3512ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3513parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3514  // Check for a post-index addressing register operand. Specifically:
3515  // postidx_reg := '+' register {, shift}
3516  //              | '-' register {, shift}
3517  //              | register {, shift}
3518
3519  // This method must return MatchOperand_NoMatch without consuming any tokens
3520  // in the case where there is no match, as other alternatives take other
3521  // parse methods.
3522  AsmToken Tok = Parser.getTok();
3523  SMLoc S = Tok.getLoc();
3524  bool haveEaten = false;
3525  bool isAdd = true;
3526  int Reg = -1;
3527  if (Tok.is(AsmToken::Plus)) {
3528    Parser.Lex(); // Eat the '+' token.
3529    haveEaten = true;
3530  } else if (Tok.is(AsmToken::Minus)) {
3531    Parser.Lex(); // Eat the '-' token.
3532    isAdd = false;
3533    haveEaten = true;
3534  }
3535  if (Parser.getTok().is(AsmToken::Identifier))
3536    Reg = tryParseRegister();
3537  if (Reg == -1) {
3538    if (!haveEaten)
3539      return MatchOperand_NoMatch;
3540    Error(Parser.getTok().getLoc(), "register expected");
3541    return MatchOperand_ParseFail;
3542  }
3543  SMLoc E = Parser.getTok().getLoc();
3544
3545  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3546  unsigned ShiftImm = 0;
3547  if (Parser.getTok().is(AsmToken::Comma)) {
3548    Parser.Lex(); // Eat the ','.
3549    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3550      return MatchOperand_ParseFail;
3551  }
3552
3553  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3554                                                  ShiftImm, S, E));
3555
3556  return MatchOperand_Success;
3557}
3558
3559ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3560parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3561  // Check for a post-index addressing register operand. Specifically:
3562  // am3offset := '+' register
3563  //              | '-' register
3564  //              | register
3565  //              | # imm
3566  //              | # + imm
3567  //              | # - imm
3568
3569  // This method must return MatchOperand_NoMatch without consuming any tokens
3570  // in the case where there is no match, as other alternatives take other
3571  // parse methods.
3572  AsmToken Tok = Parser.getTok();
3573  SMLoc S = Tok.getLoc();
3574
3575  // Do immediates first, as we always parse those if we have a '#'.
3576  if (Parser.getTok().is(AsmToken::Hash) ||
3577      Parser.getTok().is(AsmToken::Dollar)) {
3578    Parser.Lex(); // Eat the '#'.
3579    // Explicitly look for a '-', as we need to encode negative zero
3580    // differently.
3581    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3582    const MCExpr *Offset;
3583    if (getParser().ParseExpression(Offset))
3584      return MatchOperand_ParseFail;
3585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3586    if (!CE) {
3587      Error(S, "constant expression expected");
3588      return MatchOperand_ParseFail;
3589    }
3590    SMLoc E = Tok.getLoc();
3591    // Negative zero is encoded as the flag value INT32_MIN.
3592    int32_t Val = CE->getValue();
3593    if (isNegative && Val == 0)
3594      Val = INT32_MIN;
3595
3596    Operands.push_back(
3597      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3598
3599    return MatchOperand_Success;
3600  }
3601
3602
3603  bool haveEaten = false;
3604  bool isAdd = true;
3605  int Reg = -1;
3606  if (Tok.is(AsmToken::Plus)) {
3607    Parser.Lex(); // Eat the '+' token.
3608    haveEaten = true;
3609  } else if (Tok.is(AsmToken::Minus)) {
3610    Parser.Lex(); // Eat the '-' token.
3611    isAdd = false;
3612    haveEaten = true;
3613  }
3614  if (Parser.getTok().is(AsmToken::Identifier))
3615    Reg = tryParseRegister();
3616  if (Reg == -1) {
3617    if (!haveEaten)
3618      return MatchOperand_NoMatch;
3619    Error(Parser.getTok().getLoc(), "register expected");
3620    return MatchOperand_ParseFail;
3621  }
3622  SMLoc E = Parser.getTok().getLoc();
3623
3624  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3625                                                  0, S, E));
3626
3627  return MatchOperand_Success;
3628}
3629
3630/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3631/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3632/// when they refer multiple MIOperands inside a single one.
3633bool ARMAsmParser::
3634cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3635             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3636  // Rt, Rt2
3637  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3638  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3639  // Create a writeback register dummy placeholder.
3640  Inst.addOperand(MCOperand::CreateReg(0));
3641  // addr
3642  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3643  // pred
3644  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3645  return true;
3646}
3647
3648/// cvtT2StrdPre - Convert parsed operands to MCInst.
3649/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3650/// when they refer multiple MIOperands inside a single one.
3651bool ARMAsmParser::
3652cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3653             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3654  // Create a writeback register dummy placeholder.
3655  Inst.addOperand(MCOperand::CreateReg(0));
3656  // Rt, Rt2
3657  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3658  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3659  // addr
3660  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3661  // pred
3662  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3663  return true;
3664}
3665
3666/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3667/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3668/// when they refer multiple MIOperands inside a single one.
3669bool ARMAsmParser::
3670cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3671                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3672  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3673
3674  // Create a writeback register dummy placeholder.
3675  Inst.addOperand(MCOperand::CreateImm(0));
3676
3677  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3678  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3679  return true;
3680}
3681
3682/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3683/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3684/// when they refer multiple MIOperands inside a single one.
3685bool ARMAsmParser::
3686cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3687                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3688  // Create a writeback register dummy placeholder.
3689  Inst.addOperand(MCOperand::CreateImm(0));
3690  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3691  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3692  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3693  return true;
3694}
3695
3696/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3697/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3698/// when they refer multiple MIOperands inside a single one.
3699bool ARMAsmParser::
3700cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3701                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3702  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3703
3704  // Create a writeback register dummy placeholder.
3705  Inst.addOperand(MCOperand::CreateImm(0));
3706
3707  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3708  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3709  return true;
3710}
3711
3712/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3713/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3714/// when they refer multiple MIOperands inside a single one.
3715bool ARMAsmParser::
3716cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3717                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3718  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3719
3720  // Create a writeback register dummy placeholder.
3721  Inst.addOperand(MCOperand::CreateImm(0));
3722
3723  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3724  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3725  return true;
3726}
3727
3728
3729/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3730/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3731/// when they refer multiple MIOperands inside a single one.
3732bool ARMAsmParser::
3733cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3734                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3735  // Create a writeback register dummy placeholder.
3736  Inst.addOperand(MCOperand::CreateImm(0));
3737  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3738  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3739  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3740  return true;
3741}
3742
3743/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3744/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3745/// when they refer multiple MIOperands inside a single one.
3746bool ARMAsmParser::
3747cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3748                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3749  // Create a writeback register dummy placeholder.
3750  Inst.addOperand(MCOperand::CreateImm(0));
3751  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3752  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3753  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3754  return true;
3755}
3756
3757/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3758/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3759/// when they refer multiple MIOperands inside a single one.
3760bool ARMAsmParser::
3761cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3762                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3763  // Create a writeback register dummy placeholder.
3764  Inst.addOperand(MCOperand::CreateImm(0));
3765  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3766  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3767  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3768  return true;
3769}
3770
3771/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3772/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3773/// when they refer multiple MIOperands inside a single one.
3774bool ARMAsmParser::
3775cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3776                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3777  // Rt
3778  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3779  // Create a writeback register dummy placeholder.
3780  Inst.addOperand(MCOperand::CreateImm(0));
3781  // addr
3782  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3783  // offset
3784  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3785  // pred
3786  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3787  return true;
3788}
3789
3790/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3791/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3792/// when they refer multiple MIOperands inside a single one.
3793bool ARMAsmParser::
3794cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3795                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3796  // Rt
3797  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3798  // Create a writeback register dummy placeholder.
3799  Inst.addOperand(MCOperand::CreateImm(0));
3800  // addr
3801  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3802  // offset
3803  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3804  // pred
3805  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3806  return true;
3807}
3808
3809/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3810/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3811/// when they refer multiple MIOperands inside a single one.
3812bool ARMAsmParser::
3813cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3814                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3815  // Create a writeback register dummy placeholder.
3816  Inst.addOperand(MCOperand::CreateImm(0));
3817  // Rt
3818  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3819  // addr
3820  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3821  // offset
3822  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3823  // pred
3824  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3825  return true;
3826}
3827
3828/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3829/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3830/// when they refer multiple MIOperands inside a single one.
3831bool ARMAsmParser::
3832cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3833                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3834  // Create a writeback register dummy placeholder.
3835  Inst.addOperand(MCOperand::CreateImm(0));
3836  // Rt
3837  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3838  // addr
3839  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3840  // offset
3841  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3842  // pred
3843  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3844  return true;
3845}
3846
3847/// cvtLdrdPre - Convert parsed operands to MCInst.
3848/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3849/// when they refer multiple MIOperands inside a single one.
3850bool ARMAsmParser::
3851cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3852           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3853  // Rt, Rt2
3854  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3855  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3856  // Create a writeback register dummy placeholder.
3857  Inst.addOperand(MCOperand::CreateImm(0));
3858  // addr
3859  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3860  // pred
3861  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3862  return true;
3863}
3864
3865/// cvtStrdPre - Convert parsed operands to MCInst.
3866/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3867/// when they refer multiple MIOperands inside a single one.
3868bool ARMAsmParser::
3869cvtStrdPre(MCInst &Inst, unsigned Opcode,
3870           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3871  // Create a writeback register dummy placeholder.
3872  Inst.addOperand(MCOperand::CreateImm(0));
3873  // Rt, Rt2
3874  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3875  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3876  // addr
3877  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3878  // pred
3879  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3880  return true;
3881}
3882
3883/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3884/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3885/// when they refer multiple MIOperands inside a single one.
3886bool ARMAsmParser::
3887cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3888                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3889  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3890  // Create a writeback register dummy placeholder.
3891  Inst.addOperand(MCOperand::CreateImm(0));
3892  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3893  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3894  return true;
3895}
3896
3897/// cvtThumbMultiple- Convert parsed operands to MCInst.
3898/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3899/// when they refer multiple MIOperands inside a single one.
3900bool ARMAsmParser::
3901cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3902           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3903  // The second source operand must be the same register as the destination
3904  // operand.
3905  if (Operands.size() == 6 &&
3906      (((ARMOperand*)Operands[3])->getReg() !=
3907       ((ARMOperand*)Operands[5])->getReg()) &&
3908      (((ARMOperand*)Operands[3])->getReg() !=
3909       ((ARMOperand*)Operands[4])->getReg())) {
3910    Error(Operands[3]->getStartLoc(),
3911          "destination register must match source register");
3912    return false;
3913  }
3914  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3915  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3916  // If we have a three-operand form, make sure to set Rn to be the operand
3917  // that isn't the same as Rd.
3918  unsigned RegOp = 4;
3919  if (Operands.size() == 6 &&
3920      ((ARMOperand*)Operands[4])->getReg() ==
3921        ((ARMOperand*)Operands[3])->getReg())
3922    RegOp = 5;
3923  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3924  Inst.addOperand(Inst.getOperand(0));
3925  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3926
3927  return true;
3928}
3929
3930bool ARMAsmParser::
3931cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3932              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3933  // Vd
3934  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3935  // Create a writeback register dummy placeholder.
3936  Inst.addOperand(MCOperand::CreateImm(0));
3937  // Vn
3938  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3939  // pred
3940  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3941  return true;
3942}
3943
3944bool ARMAsmParser::
3945cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3946                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3947  // Vd
3948  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3949  // Create a writeback register dummy placeholder.
3950  Inst.addOperand(MCOperand::CreateImm(0));
3951  // Vn
3952  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3953  // Vm
3954  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3955  // pred
3956  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3957  return true;
3958}
3959
3960bool ARMAsmParser::
3961cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3962              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3963  // Create a writeback register dummy placeholder.
3964  Inst.addOperand(MCOperand::CreateImm(0));
3965  // Vn
3966  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3967  // Vt
3968  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3969  // pred
3970  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3971  return true;
3972}
3973
3974bool ARMAsmParser::
3975cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3976                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3977  // Create a writeback register dummy placeholder.
3978  Inst.addOperand(MCOperand::CreateImm(0));
3979  // Vn
3980  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3981  // Vm
3982  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3983  // Vt
3984  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3985  // pred
3986  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3987  return true;
3988}
3989
3990/// Parse an ARM memory expression, return false if successful else return true
3991/// or an error.  The first token must be a '[' when called.
3992bool ARMAsmParser::
3993parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3994  SMLoc S, E;
3995  assert(Parser.getTok().is(AsmToken::LBrac) &&
3996         "Token is not a Left Bracket");
3997  S = Parser.getTok().getLoc();
3998  Parser.Lex(); // Eat left bracket token.
3999
4000  const AsmToken &BaseRegTok = Parser.getTok();
4001  int BaseRegNum = tryParseRegister();
4002  if (BaseRegNum == -1)
4003    return Error(BaseRegTok.getLoc(), "register expected");
4004
4005  // The next token must either be a comma or a closing bracket.
4006  const AsmToken &Tok = Parser.getTok();
4007  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4008    return Error(Tok.getLoc(), "malformed memory operand");
4009
4010  if (Tok.is(AsmToken::RBrac)) {
4011    E = Tok.getLoc();
4012    Parser.Lex(); // Eat right bracket token.
4013
4014    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4015                                             0, 0, false, S, E));
4016
4017    // If there's a pre-indexing writeback marker, '!', just add it as a token
4018    // operand. It's rather odd, but syntactically valid.
4019    if (Parser.getTok().is(AsmToken::Exclaim)) {
4020      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4021      Parser.Lex(); // Eat the '!'.
4022    }
4023
4024    return false;
4025  }
4026
4027  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4028  Parser.Lex(); // Eat the comma.
4029
4030  // If we have a ':', it's an alignment specifier.
4031  if (Parser.getTok().is(AsmToken::Colon)) {
4032    Parser.Lex(); // Eat the ':'.
4033    E = Parser.getTok().getLoc();
4034
4035    const MCExpr *Expr;
4036    if (getParser().ParseExpression(Expr))
4037     return true;
4038
4039    // The expression has to be a constant. Memory references with relocations
4040    // don't come through here, as they use the <label> forms of the relevant
4041    // instructions.
4042    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4043    if (!CE)
4044      return Error (E, "constant expression expected");
4045
4046    unsigned Align = 0;
4047    switch (CE->getValue()) {
4048    default:
4049      return Error(E,
4050                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4051    case 16:  Align = 2; break;
4052    case 32:  Align = 4; break;
4053    case 64:  Align = 8; break;
4054    case 128: Align = 16; break;
4055    case 256: Align = 32; break;
4056    }
4057
4058    // Now we should have the closing ']'
4059    E = Parser.getTok().getLoc();
4060    if (Parser.getTok().isNot(AsmToken::RBrac))
4061      return Error(E, "']' expected");
4062    Parser.Lex(); // Eat right bracket token.
4063
4064    // Don't worry about range checking the value here. That's handled by
4065    // the is*() predicates.
4066    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4067                                             ARM_AM::no_shift, 0, Align,
4068                                             false, S, E));
4069
4070    // If there's a pre-indexing writeback marker, '!', just add it as a token
4071    // operand.
4072    if (Parser.getTok().is(AsmToken::Exclaim)) {
4073      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4074      Parser.Lex(); // Eat the '!'.
4075    }
4076
4077    return false;
4078  }
4079
4080  // If we have a '#', it's an immediate offset, else assume it's a register
4081  // offset. Be friendly and also accept a plain integer (without a leading
4082  // hash) for gas compatibility.
4083  if (Parser.getTok().is(AsmToken::Hash) ||
4084      Parser.getTok().is(AsmToken::Dollar) ||
4085      Parser.getTok().is(AsmToken::Integer)) {
4086    if (Parser.getTok().isNot(AsmToken::Integer))
4087      Parser.Lex(); // Eat the '#'.
4088    E = Parser.getTok().getLoc();
4089
4090    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4091    const MCExpr *Offset;
4092    if (getParser().ParseExpression(Offset))
4093     return true;
4094
4095    // The expression has to be a constant. Memory references with relocations
4096    // don't come through here, as they use the <label> forms of the relevant
4097    // instructions.
4098    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4099    if (!CE)
4100      return Error (E, "constant expression expected");
4101
4102    // If the constant was #-0, represent it as INT32_MIN.
4103    int32_t Val = CE->getValue();
4104    if (isNegative && Val == 0)
4105      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4106
4107    // Now we should have the closing ']'
4108    E = Parser.getTok().getLoc();
4109    if (Parser.getTok().isNot(AsmToken::RBrac))
4110      return Error(E, "']' expected");
4111    Parser.Lex(); // Eat right bracket token.
4112
4113    // Don't worry about range checking the value here. That's handled by
4114    // the is*() predicates.
4115    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4116                                             ARM_AM::no_shift, 0, 0,
4117                                             false, S, E));
4118
4119    // If there's a pre-indexing writeback marker, '!', just add it as a token
4120    // operand.
4121    if (Parser.getTok().is(AsmToken::Exclaim)) {
4122      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4123      Parser.Lex(); // Eat the '!'.
4124    }
4125
4126    return false;
4127  }
4128
4129  // The register offset is optionally preceded by a '+' or '-'
4130  bool isNegative = false;
4131  if (Parser.getTok().is(AsmToken::Minus)) {
4132    isNegative = true;
4133    Parser.Lex(); // Eat the '-'.
4134  } else if (Parser.getTok().is(AsmToken::Plus)) {
4135    // Nothing to do.
4136    Parser.Lex(); // Eat the '+'.
4137  }
4138
4139  E = Parser.getTok().getLoc();
4140  int OffsetRegNum = tryParseRegister();
4141  if (OffsetRegNum == -1)
4142    return Error(E, "register expected");
4143
4144  // If there's a shift operator, handle it.
4145  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4146  unsigned ShiftImm = 0;
4147  if (Parser.getTok().is(AsmToken::Comma)) {
4148    Parser.Lex(); // Eat the ','.
4149    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4150      return true;
4151  }
4152
4153  // Now we should have the closing ']'
4154  E = Parser.getTok().getLoc();
4155  if (Parser.getTok().isNot(AsmToken::RBrac))
4156    return Error(E, "']' expected");
4157  Parser.Lex(); // Eat right bracket token.
4158
4159  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4160                                           ShiftType, ShiftImm, 0, isNegative,
4161                                           S, E));
4162
4163  // If there's a pre-indexing writeback marker, '!', just add it as a token
4164  // operand.
4165  if (Parser.getTok().is(AsmToken::Exclaim)) {
4166    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4167    Parser.Lex(); // Eat the '!'.
4168  }
4169
4170  return false;
4171}
4172
4173/// parseMemRegOffsetShift - one of these two:
4174///   ( lsl | lsr | asr | ror ) , # shift_amount
4175///   rrx
4176/// return true if it parses a shift otherwise it returns false.
4177bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4178                                          unsigned &Amount) {
4179  SMLoc Loc = Parser.getTok().getLoc();
4180  const AsmToken &Tok = Parser.getTok();
4181  if (Tok.isNot(AsmToken::Identifier))
4182    return true;
4183  StringRef ShiftName = Tok.getString();
4184  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4185      ShiftName == "asl" || ShiftName == "ASL")
4186    St = ARM_AM::lsl;
4187  else if (ShiftName == "lsr" || ShiftName == "LSR")
4188    St = ARM_AM::lsr;
4189  else if (ShiftName == "asr" || ShiftName == "ASR")
4190    St = ARM_AM::asr;
4191  else if (ShiftName == "ror" || ShiftName == "ROR")
4192    St = ARM_AM::ror;
4193  else if (ShiftName == "rrx" || ShiftName == "RRX")
4194    St = ARM_AM::rrx;
4195  else
4196    return Error(Loc, "illegal shift operator");
4197  Parser.Lex(); // Eat shift type token.
4198
4199  // rrx stands alone.
4200  Amount = 0;
4201  if (St != ARM_AM::rrx) {
4202    Loc = Parser.getTok().getLoc();
4203    // A '#' and a shift amount.
4204    const AsmToken &HashTok = Parser.getTok();
4205    if (HashTok.isNot(AsmToken::Hash) &&
4206        HashTok.isNot(AsmToken::Dollar))
4207      return Error(HashTok.getLoc(), "'#' expected");
4208    Parser.Lex(); // Eat hash token.
4209
4210    const MCExpr *Expr;
4211    if (getParser().ParseExpression(Expr))
4212      return true;
4213    // Range check the immediate.
4214    // lsl, ror: 0 <= imm <= 31
4215    // lsr, asr: 0 <= imm <= 32
4216    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4217    if (!CE)
4218      return Error(Loc, "shift amount must be an immediate");
4219    int64_t Imm = CE->getValue();
4220    if (Imm < 0 ||
4221        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4222        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4223      return Error(Loc, "immediate shift value out of range");
4224    Amount = Imm;
4225  }
4226
4227  return false;
4228}
4229
4230/// parseFPImm - A floating point immediate expression operand.
4231ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4232parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4233  // Anything that can accept a floating point constant as an operand
4234  // needs to go through here, as the regular ParseExpression is
4235  // integer only.
4236  //
4237  // This routine still creates a generic Immediate operand, containing
4238  // a bitcast of the 64-bit floating point value. The various operands
4239  // that accept floats can check whether the value is valid for them
4240  // via the standard is*() predicates.
4241
4242  SMLoc S = Parser.getTok().getLoc();
4243
4244  if (Parser.getTok().isNot(AsmToken::Hash) &&
4245      Parser.getTok().isNot(AsmToken::Dollar))
4246    return MatchOperand_NoMatch;
4247
4248  // Disambiguate the VMOV forms that can accept an FP immediate.
4249  // vmov.f32 <sreg>, #imm
4250  // vmov.f64 <dreg>, #imm
4251  // vmov.f32 <dreg>, #imm  @ vector f32x2
4252  // vmov.f32 <qreg>, #imm  @ vector f32x4
4253  //
4254  // There are also the NEON VMOV instructions which expect an
4255  // integer constant. Make sure we don't try to parse an FPImm
4256  // for these:
4257  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4258  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4259  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4260                           TyOp->getToken() != ".f64"))
4261    return MatchOperand_NoMatch;
4262
4263  Parser.Lex(); // Eat the '#'.
4264
4265  // Handle negation, as that still comes through as a separate token.
4266  bool isNegative = false;
4267  if (Parser.getTok().is(AsmToken::Minus)) {
4268    isNegative = true;
4269    Parser.Lex();
4270  }
4271  const AsmToken &Tok = Parser.getTok();
4272  SMLoc Loc = Tok.getLoc();
4273  if (Tok.is(AsmToken::Real)) {
4274    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4275    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4276    // If we had a '-' in front, toggle the sign bit.
4277    IntVal ^= (uint64_t)isNegative << 31;
4278    Parser.Lex(); // Eat the token.
4279    Operands.push_back(ARMOperand::CreateImm(
4280          MCConstantExpr::Create(IntVal, getContext()),
4281          S, Parser.getTok().getLoc()));
4282    return MatchOperand_Success;
4283  }
4284  // Also handle plain integers. Instructions which allow floating point
4285  // immediates also allow a raw encoded 8-bit value.
4286  if (Tok.is(AsmToken::Integer)) {
4287    int64_t Val = Tok.getIntVal();
4288    Parser.Lex(); // Eat the token.
4289    if (Val > 255 || Val < 0) {
4290      Error(Loc, "encoded floating point value out of range");
4291      return MatchOperand_ParseFail;
4292    }
4293    double RealVal = ARM_AM::getFPImmFloat(Val);
4294    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4295    Operands.push_back(ARMOperand::CreateImm(
4296        MCConstantExpr::Create(Val, getContext()), S,
4297        Parser.getTok().getLoc()));
4298    return MatchOperand_Success;
4299  }
4300
4301  Error(Loc, "invalid floating point immediate");
4302  return MatchOperand_ParseFail;
4303}
4304
4305/// Parse a arm instruction operand.  For now this parses the operand regardless
4306/// of the mnemonic.
4307bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4308                                StringRef Mnemonic) {
4309  SMLoc S, E;
4310
4311  // Check if the current operand has a custom associated parser, if so, try to
4312  // custom parse the operand, or fallback to the general approach.
4313  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4314  if (ResTy == MatchOperand_Success)
4315    return false;
4316  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4317  // there was a match, but an error occurred, in which case, just return that
4318  // the operand parsing failed.
4319  if (ResTy == MatchOperand_ParseFail)
4320    return true;
4321
4322  switch (getLexer().getKind()) {
4323  default:
4324    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4325    return true;
4326  case AsmToken::Identifier: {
4327    if (!tryParseRegisterWithWriteBack(Operands))
4328      return false;
4329    int Res = tryParseShiftRegister(Operands);
4330    if (Res == 0) // success
4331      return false;
4332    else if (Res == -1) // irrecoverable error
4333      return true;
4334    // If this is VMRS, check for the apsr_nzcv operand.
4335    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4336      S = Parser.getTok().getLoc();
4337      Parser.Lex();
4338      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4339      return false;
4340    }
4341
4342    // Fall though for the Identifier case that is not a register or a
4343    // special name.
4344  }
4345  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4346  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4347  case AsmToken::String:  // quoted label names.
4348  case AsmToken::Dot: {   // . as a branch target
4349    // This was not a register so parse other operands that start with an
4350    // identifier (like labels) as expressions and create them as immediates.
4351    const MCExpr *IdVal;
4352    S = Parser.getTok().getLoc();
4353    if (getParser().ParseExpression(IdVal))
4354      return true;
4355    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4356    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4357    return false;
4358  }
4359  case AsmToken::LBrac:
4360    return parseMemory(Operands);
4361  case AsmToken::LCurly:
4362    return parseRegisterList(Operands);
4363  case AsmToken::Dollar:
4364  case AsmToken::Hash: {
4365    // #42 -> immediate.
4366    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4367    S = Parser.getTok().getLoc();
4368    Parser.Lex();
4369    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4370    const MCExpr *ImmVal;
4371    if (getParser().ParseExpression(ImmVal))
4372      return true;
4373    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4374    if (CE) {
4375      int32_t Val = CE->getValue();
4376      if (isNegative && Val == 0)
4377        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4378    }
4379    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4380    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4381    return false;
4382  }
4383  case AsmToken::Colon: {
4384    // ":lower16:" and ":upper16:" expression prefixes
4385    // FIXME: Check it's an expression prefix,
4386    // e.g. (FOO - :lower16:BAR) isn't legal.
4387    ARMMCExpr::VariantKind RefKind;
4388    if (parsePrefix(RefKind))
4389      return true;
4390
4391    const MCExpr *SubExprVal;
4392    if (getParser().ParseExpression(SubExprVal))
4393      return true;
4394
4395    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4396                                                   getContext());
4397    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4398    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4399    return false;
4400  }
4401  }
4402}
4403
4404// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4405//  :lower16: and :upper16:.
4406bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4407  RefKind = ARMMCExpr::VK_ARM_None;
4408
4409  // :lower16: and :upper16: modifiers
4410  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4411  Parser.Lex(); // Eat ':'
4412
4413  if (getLexer().isNot(AsmToken::Identifier)) {
4414    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4415    return true;
4416  }
4417
4418  StringRef IDVal = Parser.getTok().getIdentifier();
4419  if (IDVal == "lower16") {
4420    RefKind = ARMMCExpr::VK_ARM_LO16;
4421  } else if (IDVal == "upper16") {
4422    RefKind = ARMMCExpr::VK_ARM_HI16;
4423  } else {
4424    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4425    return true;
4426  }
4427  Parser.Lex();
4428
4429  if (getLexer().isNot(AsmToken::Colon)) {
4430    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4431    return true;
4432  }
4433  Parser.Lex(); // Eat the last ':'
4434  return false;
4435}
4436
4437/// \brief Given a mnemonic, split out possible predication code and carry
4438/// setting letters to form a canonical mnemonic and flags.
4439//
4440// FIXME: Would be nice to autogen this.
4441// FIXME: This is a bit of a maze of special cases.
4442StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4443                                      unsigned &PredicationCode,
4444                                      bool &CarrySetting,
4445                                      unsigned &ProcessorIMod,
4446                                      StringRef &ITMask) {
4447  PredicationCode = ARMCC::AL;
4448  CarrySetting = false;
4449  ProcessorIMod = 0;
4450
4451  // Ignore some mnemonics we know aren't predicated forms.
4452  //
4453  // FIXME: Would be nice to autogen this.
4454  if ((Mnemonic == "movs" && isThumb()) ||
4455      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4456      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4457      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4458      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4459      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4460      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4461      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4462      Mnemonic == "fmuls")
4463    return Mnemonic;
4464
4465  // First, split out any predication code. Ignore mnemonics we know aren't
4466  // predicated but do have a carry-set and so weren't caught above.
4467  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4468      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4469      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4470      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4471    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4472      .Case("eq", ARMCC::EQ)
4473      .Case("ne", ARMCC::NE)
4474      .Case("hs", ARMCC::HS)
4475      .Case("cs", ARMCC::HS)
4476      .Case("lo", ARMCC::LO)
4477      .Case("cc", ARMCC::LO)
4478      .Case("mi", ARMCC::MI)
4479      .Case("pl", ARMCC::PL)
4480      .Case("vs", ARMCC::VS)
4481      .Case("vc", ARMCC::VC)
4482      .Case("hi", ARMCC::HI)
4483      .Case("ls", ARMCC::LS)
4484      .Case("ge", ARMCC::GE)
4485      .Case("lt", ARMCC::LT)
4486      .Case("gt", ARMCC::GT)
4487      .Case("le", ARMCC::LE)
4488      .Case("al", ARMCC::AL)
4489      .Default(~0U);
4490    if (CC != ~0U) {
4491      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4492      PredicationCode = CC;
4493    }
4494  }
4495
4496  // Next, determine if we have a carry setting bit. We explicitly ignore all
4497  // the instructions we know end in 's'.
4498  if (Mnemonic.endswith("s") &&
4499      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4500        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4501        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4502        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4503        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4504        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4505        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4506        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4507        (Mnemonic == "movs" && isThumb()))) {
4508    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4509    CarrySetting = true;
4510  }
4511
4512  // The "cps" instruction can have a interrupt mode operand which is glued into
4513  // the mnemonic. Check if this is the case, split it and parse the imod op
4514  if (Mnemonic.startswith("cps")) {
4515    // Split out any imod code.
4516    unsigned IMod =
4517      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4518      .Case("ie", ARM_PROC::IE)
4519      .Case("id", ARM_PROC::ID)
4520      .Default(~0U);
4521    if (IMod != ~0U) {
4522      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4523      ProcessorIMod = IMod;
4524    }
4525  }
4526
4527  // The "it" instruction has the condition mask on the end of the mnemonic.
4528  if (Mnemonic.startswith("it")) {
4529    ITMask = Mnemonic.slice(2, Mnemonic.size());
4530    Mnemonic = Mnemonic.slice(0, 2);
4531  }
4532
4533  return Mnemonic;
4534}
4535
4536/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4537/// inclusion of carry set or predication code operands.
4538//
4539// FIXME: It would be nice to autogen this.
4540void ARMAsmParser::
4541getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4542                      bool &CanAcceptPredicationCode) {
4543  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4544      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4545      Mnemonic == "add" || Mnemonic == "adc" ||
4546      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4547      Mnemonic == "orr" || Mnemonic == "mvn" ||
4548      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4549      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4550      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4551                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4552                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4553    CanAcceptCarrySet = true;
4554  } else
4555    CanAcceptCarrySet = false;
4556
4557  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4558      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4559      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4560      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4561      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4562      (Mnemonic == "clrex" && !isThumb()) ||
4563      (Mnemonic == "nop" && isThumbOne()) ||
4564      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4565        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4566        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4567      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4568       !isThumb()) ||
4569      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4570    CanAcceptPredicationCode = false;
4571  } else
4572    CanAcceptPredicationCode = true;
4573
4574  if (isThumb()) {
4575    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4576        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4577      CanAcceptPredicationCode = false;
4578  }
4579}
4580
4581bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4582                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4583  // FIXME: This is all horribly hacky. We really need a better way to deal
4584  // with optional operands like this in the matcher table.
4585
4586  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4587  // another does not. Specifically, the MOVW instruction does not. So we
4588  // special case it here and remove the defaulted (non-setting) cc_out
4589  // operand if that's the instruction we're trying to match.
4590  //
4591  // We do this as post-processing of the explicit operands rather than just
4592  // conditionally adding the cc_out in the first place because we need
4593  // to check the type of the parsed immediate operand.
4594  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4595      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4596      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4597      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4598    return true;
4599
4600  // Register-register 'add' for thumb does not have a cc_out operand
4601  // when there are only two register operands.
4602  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4603      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4604      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4605      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4606    return true;
4607  // Register-register 'add' for thumb does not have a cc_out operand
4608  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4609  // have to check the immediate range here since Thumb2 has a variant
4610  // that can handle a different range and has a cc_out operand.
4611  if (((isThumb() && Mnemonic == "add") ||
4612       (isThumbTwo() && Mnemonic == "sub")) &&
4613      Operands.size() == 6 &&
4614      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4615      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4616      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4617      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4618      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4619       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4620    return true;
4621  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4622  // imm0_4095 variant. That's the least-preferred variant when
4623  // selecting via the generic "add" mnemonic, so to know that we
4624  // should remove the cc_out operand, we have to explicitly check that
4625  // it's not one of the other variants. Ugh.
4626  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4627      Operands.size() == 6 &&
4628      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4629      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4630      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4631    // Nest conditions rather than one big 'if' statement for readability.
4632    //
4633    // If either register is a high reg, it's either one of the SP
4634    // variants (handled above) or a 32-bit encoding, so we just
4635    // check against T3.
4636    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4637         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4638        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4639      return false;
4640    // If both registers are low, we're in an IT block, and the immediate is
4641    // in range, we should use encoding T1 instead, which has a cc_out.
4642    if (inITBlock() &&
4643        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4644        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4645        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4646      return false;
4647
4648    // Otherwise, we use encoding T4, which does not have a cc_out
4649    // operand.
4650    return true;
4651  }
4652
4653  // The thumb2 multiply instruction doesn't have a CCOut register, so
4654  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4655  // use the 16-bit encoding or not.
4656  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4657      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4658      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4659      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4660      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4661      // If the registers aren't low regs, the destination reg isn't the
4662      // same as one of the source regs, or the cc_out operand is zero
4663      // outside of an IT block, we have to use the 32-bit encoding, so
4664      // remove the cc_out operand.
4665      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4666       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4667       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4668       !inITBlock() ||
4669       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4670        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4671        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4672        static_cast<ARMOperand*>(Operands[4])->getReg())))
4673    return true;
4674
4675  // Also check the 'mul' syntax variant that doesn't specify an explicit
4676  // destination register.
4677  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4678      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4679      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4680      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4681      // If the registers aren't low regs  or the cc_out operand is zero
4682      // outside of an IT block, we have to use the 32-bit encoding, so
4683      // remove the cc_out operand.
4684      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4685       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4686       !inITBlock()))
4687    return true;
4688
4689
4690
4691  // Register-register 'add/sub' for thumb does not have a cc_out operand
4692  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4693  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4694  // right, this will result in better diagnostics (which operand is off)
4695  // anyway.
4696  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4697      (Operands.size() == 5 || Operands.size() == 6) &&
4698      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4699      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4700      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4701    return true;
4702
4703  return false;
4704}
4705
4706static bool isDataTypeToken(StringRef Tok) {
4707  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4708    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4709    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4710    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4711    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4712    Tok == ".f" || Tok == ".d";
4713}
4714
4715// FIXME: This bit should probably be handled via an explicit match class
4716// in the .td files that matches the suffix instead of having it be
4717// a literal string token the way it is now.
4718static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4719  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4720}
4721
4722static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4723/// Parse an arm instruction mnemonic followed by its operands.
4724bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4725                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4726  // Apply mnemonic aliases before doing anything else, as the destination
4727  // mnemnonic may include suffices and we want to handle them normally.
4728  // The generic tblgen'erated code does this later, at the start of
4729  // MatchInstructionImpl(), but that's too late for aliases that include
4730  // any sort of suffix.
4731  unsigned AvailableFeatures = getAvailableFeatures();
4732  applyMnemonicAliases(Name, AvailableFeatures);
4733
4734  // First check for the ARM-specific .req directive.
4735  if (Parser.getTok().is(AsmToken::Identifier) &&
4736      Parser.getTok().getIdentifier() == ".req") {
4737    parseDirectiveReq(Name, NameLoc);
4738    // We always return 'error' for this, as we're done with this
4739    // statement and don't need to match the 'instruction."
4740    return true;
4741  }
4742
4743  // Create the leading tokens for the mnemonic, split by '.' characters.
4744  size_t Start = 0, Next = Name.find('.');
4745  StringRef Mnemonic = Name.slice(Start, Next);
4746
4747  // Split out the predication code and carry setting flag from the mnemonic.
4748  unsigned PredicationCode;
4749  unsigned ProcessorIMod;
4750  bool CarrySetting;
4751  StringRef ITMask;
4752  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4753                           ProcessorIMod, ITMask);
4754
4755  // In Thumb1, only the branch (B) instruction can be predicated.
4756  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4757    Parser.EatToEndOfStatement();
4758    return Error(NameLoc, "conditional execution not supported in Thumb1");
4759  }
4760
4761  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4762
4763  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4764  // is the mask as it will be for the IT encoding if the conditional
4765  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4766  // where the conditional bit0 is zero, the instruction post-processing
4767  // will adjust the mask accordingly.
4768  if (Mnemonic == "it") {
4769    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4770    if (ITMask.size() > 3) {
4771      Parser.EatToEndOfStatement();
4772      return Error(Loc, "too many conditions on IT instruction");
4773    }
4774    unsigned Mask = 8;
4775    for (unsigned i = ITMask.size(); i != 0; --i) {
4776      char pos = ITMask[i - 1];
4777      if (pos != 't' && pos != 'e') {
4778        Parser.EatToEndOfStatement();
4779        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4780      }
4781      Mask >>= 1;
4782      if (ITMask[i - 1] == 't')
4783        Mask |= 8;
4784    }
4785    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4786  }
4787
4788  // FIXME: This is all a pretty gross hack. We should automatically handle
4789  // optional operands like this via tblgen.
4790
4791  // Next, add the CCOut and ConditionCode operands, if needed.
4792  //
4793  // For mnemonics which can ever incorporate a carry setting bit or predication
4794  // code, our matching model involves us always generating CCOut and
4795  // ConditionCode operands to match the mnemonic "as written" and then we let
4796  // the matcher deal with finding the right instruction or generating an
4797  // appropriate error.
4798  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4799  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4800
4801  // If we had a carry-set on an instruction that can't do that, issue an
4802  // error.
4803  if (!CanAcceptCarrySet && CarrySetting) {
4804    Parser.EatToEndOfStatement();
4805    return Error(NameLoc, "instruction '" + Mnemonic +
4806                 "' can not set flags, but 's' suffix specified");
4807  }
4808  // If we had a predication code on an instruction that can't do that, issue an
4809  // error.
4810  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4811    Parser.EatToEndOfStatement();
4812    return Error(NameLoc, "instruction '" + Mnemonic +
4813                 "' is not predicable, but condition code specified");
4814  }
4815
4816  // Add the carry setting operand, if necessary.
4817  if (CanAcceptCarrySet) {
4818    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4819    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4820                                               Loc));
4821  }
4822
4823  // Add the predication code operand, if necessary.
4824  if (CanAcceptPredicationCode) {
4825    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4826                                      CarrySetting);
4827    Operands.push_back(ARMOperand::CreateCondCode(
4828                         ARMCC::CondCodes(PredicationCode), Loc));
4829  }
4830
4831  // Add the processor imod operand, if necessary.
4832  if (ProcessorIMod) {
4833    Operands.push_back(ARMOperand::CreateImm(
4834          MCConstantExpr::Create(ProcessorIMod, getContext()),
4835                                 NameLoc, NameLoc));
4836  }
4837
4838  // Add the remaining tokens in the mnemonic.
4839  while (Next != StringRef::npos) {
4840    Start = Next;
4841    Next = Name.find('.', Start + 1);
4842    StringRef ExtraToken = Name.slice(Start, Next);
4843
4844    // Some NEON instructions have an optional datatype suffix that is
4845    // completely ignored. Check for that.
4846    if (isDataTypeToken(ExtraToken) &&
4847        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4848      continue;
4849
4850    if (ExtraToken != ".n") {
4851      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4852      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4853    }
4854  }
4855
4856  // Read the remaining operands.
4857  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4858    // Read the first operand.
4859    if (parseOperand(Operands, Mnemonic)) {
4860      Parser.EatToEndOfStatement();
4861      return true;
4862    }
4863
4864    while (getLexer().is(AsmToken::Comma)) {
4865      Parser.Lex();  // Eat the comma.
4866
4867      // Parse and remember the operand.
4868      if (parseOperand(Operands, Mnemonic)) {
4869        Parser.EatToEndOfStatement();
4870        return true;
4871      }
4872    }
4873  }
4874
4875  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4876    SMLoc Loc = getLexer().getLoc();
4877    Parser.EatToEndOfStatement();
4878    return Error(Loc, "unexpected token in argument list");
4879  }
4880
4881  Parser.Lex(); // Consume the EndOfStatement
4882
4883  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4884  // do and don't have a cc_out optional-def operand. With some spot-checks
4885  // of the operand list, we can figure out which variant we're trying to
4886  // parse and adjust accordingly before actually matching. We shouldn't ever
4887  // try to remove a cc_out operand that was explicitly set on the the
4888  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4889  // table driven matcher doesn't fit well with the ARM instruction set.
4890  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4891    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4892    Operands.erase(Operands.begin() + 1);
4893    delete Op;
4894  }
4895
4896  // ARM mode 'blx' need special handling, as the register operand version
4897  // is predicable, but the label operand version is not. So, we can't rely
4898  // on the Mnemonic based checking to correctly figure out when to put
4899  // a k_CondCode operand in the list. If we're trying to match the label
4900  // version, remove the k_CondCode operand here.
4901  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4902      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4903    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4904    Operands.erase(Operands.begin() + 1);
4905    delete Op;
4906  }
4907
4908  // The vector-compare-to-zero instructions have a literal token "#0" at
4909  // the end that comes to here as an immediate operand. Convert it to a
4910  // token to play nicely with the matcher.
4911  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4912      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4913      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4914    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4915    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4916    if (CE && CE->getValue() == 0) {
4917      Operands.erase(Operands.begin() + 5);
4918      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4919      delete Op;
4920    }
4921  }
4922  // VCMP{E} does the same thing, but with a different operand count.
4923  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4924      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4925    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4926    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4927    if (CE && CE->getValue() == 0) {
4928      Operands.erase(Operands.begin() + 4);
4929      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4930      delete Op;
4931    }
4932  }
4933  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4934  // end. Convert it to a token here. Take care not to convert those
4935  // that should hit the Thumb2 encoding.
4936  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4937      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4938      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4939      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4940    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4941    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4942    if (CE && CE->getValue() == 0 &&
4943        (isThumbOne() ||
4944         // The cc_out operand matches the IT block.
4945         ((inITBlock() != CarrySetting) &&
4946         // Neither register operand is a high register.
4947         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4948          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4949      Operands.erase(Operands.begin() + 5);
4950      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4951      delete Op;
4952    }
4953  }
4954
4955  return false;
4956}
4957
4958// Validate context-sensitive operand constraints.
4959
4960// return 'true' if register list contains non-low GPR registers,
4961// 'false' otherwise. If Reg is in the register list or is HiReg, set
4962// 'containsReg' to true.
4963static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4964                                 unsigned HiReg, bool &containsReg) {
4965  containsReg = false;
4966  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4967    unsigned OpReg = Inst.getOperand(i).getReg();
4968    if (OpReg == Reg)
4969      containsReg = true;
4970    // Anything other than a low register isn't legal here.
4971    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4972      return true;
4973  }
4974  return false;
4975}
4976
4977// Check if the specified regisgter is in the register list of the inst,
4978// starting at the indicated operand number.
4979static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4980  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4981    unsigned OpReg = Inst.getOperand(i).getReg();
4982    if (OpReg == Reg)
4983      return true;
4984  }
4985  return false;
4986}
4987
4988// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4989// the ARMInsts array) instead. Getting that here requires awkward
4990// API changes, though. Better way?
4991namespace llvm {
4992extern const MCInstrDesc ARMInsts[];
4993}
4994static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4995  return ARMInsts[Opcode];
4996}
4997
4998// FIXME: We would really like to be able to tablegen'erate this.
4999bool ARMAsmParser::
5000validateInstruction(MCInst &Inst,
5001                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5002  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5003  SMLoc Loc = Operands[0]->getStartLoc();
5004  // Check the IT block state first.
5005  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
5006  // being allowed in IT blocks, but not being predicable.  It just always
5007  // executes.
5008  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
5009    unsigned bit = 1;
5010    if (ITState.FirstCond)
5011      ITState.FirstCond = false;
5012    else
5013      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5014    // The instruction must be predicable.
5015    if (!MCID.isPredicable())
5016      return Error(Loc, "instructions in IT block must be predicable");
5017    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5018    unsigned ITCond = bit ? ITState.Cond :
5019      ARMCC::getOppositeCondition(ITState.Cond);
5020    if (Cond != ITCond) {
5021      // Find the condition code Operand to get its SMLoc information.
5022      SMLoc CondLoc;
5023      for (unsigned i = 1; i < Operands.size(); ++i)
5024        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5025          CondLoc = Operands[i]->getStartLoc();
5026      return Error(CondLoc, "incorrect condition in IT block; got '" +
5027                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5028                   "', but expected '" +
5029                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5030    }
5031  // Check for non-'al' condition codes outside of the IT block.
5032  } else if (isThumbTwo() && MCID.isPredicable() &&
5033             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5034             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5035             Inst.getOpcode() != ARM::t2B)
5036    return Error(Loc, "predicated instructions must be in IT block");
5037
5038  switch (Inst.getOpcode()) {
5039  case ARM::LDRD:
5040  case ARM::LDRD_PRE:
5041  case ARM::LDRD_POST:
5042  case ARM::LDREXD: {
5043    // Rt2 must be Rt + 1.
5044    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5045    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5046    if (Rt2 != Rt + 1)
5047      return Error(Operands[3]->getStartLoc(),
5048                   "destination operands must be sequential");
5049    return false;
5050  }
5051  case ARM::STRD: {
5052    // Rt2 must be Rt + 1.
5053    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5054    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5055    if (Rt2 != Rt + 1)
5056      return Error(Operands[3]->getStartLoc(),
5057                   "source operands must be sequential");
5058    return false;
5059  }
5060  case ARM::STRD_PRE:
5061  case ARM::STRD_POST:
5062  case ARM::STREXD: {
5063    // Rt2 must be Rt + 1.
5064    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5065    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5066    if (Rt2 != Rt + 1)
5067      return Error(Operands[3]->getStartLoc(),
5068                   "source operands must be sequential");
5069    return false;
5070  }
5071  case ARM::SBFX:
5072  case ARM::UBFX: {
5073    // width must be in range [1, 32-lsb]
5074    unsigned lsb = Inst.getOperand(2).getImm();
5075    unsigned widthm1 = Inst.getOperand(3).getImm();
5076    if (widthm1 >= 32 - lsb)
5077      return Error(Operands[5]->getStartLoc(),
5078                   "bitfield width must be in range [1,32-lsb]");
5079    return false;
5080  }
5081  case ARM::tLDMIA: {
5082    // If we're parsing Thumb2, the .w variant is available and handles
5083    // most cases that are normally illegal for a Thumb1 LDM
5084    // instruction. We'll make the transformation in processInstruction()
5085    // if necessary.
5086    //
5087    // Thumb LDM instructions are writeback iff the base register is not
5088    // in the register list.
5089    unsigned Rn = Inst.getOperand(0).getReg();
5090    bool hasWritebackToken =
5091      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5092       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5093    bool listContainsBase;
5094    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5095      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5096                   "registers must be in range r0-r7");
5097    // If we should have writeback, then there should be a '!' token.
5098    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5099      return Error(Operands[2]->getStartLoc(),
5100                   "writeback operator '!' expected");
5101    // If we should not have writeback, there must not be a '!'. This is
5102    // true even for the 32-bit wide encodings.
5103    if (listContainsBase && hasWritebackToken)
5104      return Error(Operands[3]->getStartLoc(),
5105                   "writeback operator '!' not allowed when base register "
5106                   "in register list");
5107
5108    break;
5109  }
5110  case ARM::t2LDMIA_UPD: {
5111    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5112      return Error(Operands[4]->getStartLoc(),
5113                   "writeback operator '!' not allowed when base register "
5114                   "in register list");
5115    break;
5116  }
5117  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5118  // so only issue a diagnostic for thumb1. The instructions will be
5119  // switched to the t2 encodings in processInstruction() if necessary.
5120  case ARM::tPOP: {
5121    bool listContainsBase;
5122    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5123        !isThumbTwo())
5124      return Error(Operands[2]->getStartLoc(),
5125                   "registers must be in range r0-r7 or pc");
5126    break;
5127  }
5128  case ARM::tPUSH: {
5129    bool listContainsBase;
5130    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5131        !isThumbTwo())
5132      return Error(Operands[2]->getStartLoc(),
5133                   "registers must be in range r0-r7 or lr");
5134    break;
5135  }
5136  case ARM::tSTMIA_UPD: {
5137    bool listContainsBase;
5138    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5139      return Error(Operands[4]->getStartLoc(),
5140                   "registers must be in range r0-r7");
5141    break;
5142  }
5143  }
5144
5145  return false;
5146}
5147
5148static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5149  switch(Opc) {
5150  default: assert(0 && "unexpected opcode!");
5151  // VST1LN
5152  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5153  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5154  case ARM::VST1LNdWB_fixed_Asm_U8:
5155    Spacing = 1;
5156    return ARM::VST1LNd8_UPD;
5157  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5158  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5159  case ARM::VST1LNdWB_fixed_Asm_U16:
5160    Spacing = 1;
5161    return ARM::VST1LNd16_UPD;
5162  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5163  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5164  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5165    Spacing = 1;
5166    return ARM::VST1LNd32_UPD;
5167  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5168  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5169  case ARM::VST1LNdWB_register_Asm_U8:
5170    Spacing = 1;
5171    return ARM::VST1LNd8_UPD;
5172  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5173  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5174  case ARM::VST1LNdWB_register_Asm_U16:
5175    Spacing = 1;
5176    return ARM::VST1LNd16_UPD;
5177  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5178  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5179  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5180    Spacing = 1;
5181    return ARM::VST1LNd32_UPD;
5182  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5183  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5184  case ARM::VST1LNdAsm_U8:
5185    Spacing = 1;
5186    return ARM::VST1LNd8;
5187  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5188  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5189  case ARM::VST1LNdAsm_U16:
5190    Spacing = 1;
5191    return ARM::VST1LNd16;
5192  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5193  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5194  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5195    Spacing = 1;
5196    return ARM::VST1LNd32;
5197
5198  // VST2LN
5199  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5200  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5201  case ARM::VST2LNdWB_fixed_Asm_U8:
5202    Spacing = 1;
5203    return ARM::VST2LNd8_UPD;
5204  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5205  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5206  case ARM::VST2LNdWB_fixed_Asm_U16:
5207    Spacing = 1;
5208    return ARM::VST2LNd16_UPD;
5209  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5210  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5211  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5212    Spacing = 1;
5213    return ARM::VST2LNd32_UPD;
5214  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5215  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5216  case ARM::VST2LNqWB_fixed_Asm_U16:
5217    Spacing = 2;
5218    return ARM::VST2LNq16_UPD;
5219  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5220  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5221  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5222    Spacing = 2;
5223    return ARM::VST2LNq32_UPD;
5224
5225  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5226  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5227  case ARM::VST2LNdWB_register_Asm_U8:
5228    Spacing = 1;
5229    return ARM::VST2LNd8_UPD;
5230  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5231  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5232  case ARM::VST2LNdWB_register_Asm_U16:
5233    Spacing = 1;
5234    return ARM::VST2LNd16_UPD;
5235  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5236  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5237  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5238    Spacing = 1;
5239    return ARM::VST2LNd32_UPD;
5240  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5241  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5242  case ARM::VST2LNqWB_register_Asm_U16:
5243    Spacing = 2;
5244    return ARM::VST2LNq16_UPD;
5245  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5246  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5247  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5248    Spacing = 2;
5249    return ARM::VST2LNq32_UPD;
5250
5251  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5252  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5253  case ARM::VST2LNdAsm_U8:
5254    Spacing = 1;
5255    return ARM::VST2LNd8;
5256  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5257  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5258  case ARM::VST2LNdAsm_U16:
5259    Spacing = 1;
5260    return ARM::VST2LNd16;
5261  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5262  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5263  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5264    Spacing = 1;
5265    return ARM::VST2LNd32;
5266  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5267  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5268  case ARM::VST2LNqAsm_U16:
5269    Spacing = 2;
5270    return ARM::VST2LNq16;
5271  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5272  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5273  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5274    Spacing = 2;
5275    return ARM::VST2LNq32;
5276  }
5277}
5278
5279static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5280  switch(Opc) {
5281  default: assert(0 && "unexpected opcode!");
5282  // VLD1LN
5283  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5284  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5285  case ARM::VLD1LNdWB_fixed_Asm_U8:
5286    Spacing = 1;
5287    return ARM::VLD1LNd8_UPD;
5288  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5289  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5290  case ARM::VLD1LNdWB_fixed_Asm_U16:
5291    Spacing = 1;
5292    return ARM::VLD1LNd16_UPD;
5293  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5294  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5295  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5296    Spacing = 1;
5297    return ARM::VLD1LNd32_UPD;
5298  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5299  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5300  case ARM::VLD1LNdWB_register_Asm_U8:
5301    Spacing = 1;
5302    return ARM::VLD1LNd8_UPD;
5303  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5304  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5305  case ARM::VLD1LNdWB_register_Asm_U16:
5306    Spacing = 1;
5307    return ARM::VLD1LNd16_UPD;
5308  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5309  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5310  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5311    Spacing = 1;
5312    return ARM::VLD1LNd32_UPD;
5313  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5314  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5315  case ARM::VLD1LNdAsm_U8:
5316    Spacing = 1;
5317    return ARM::VLD1LNd8;
5318  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5319  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5320  case ARM::VLD1LNdAsm_U16:
5321    Spacing = 1;
5322    return ARM::VLD1LNd16;
5323  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5324  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5325  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5326    Spacing = 1;
5327    return ARM::VLD1LNd32;
5328
5329  // VLD2LN
5330  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5331  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5332  case ARM::VLD2LNdWB_fixed_Asm_U8:
5333    Spacing = 1;
5334    return ARM::VLD2LNd8_UPD;
5335  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5336  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5337  case ARM::VLD2LNdWB_fixed_Asm_U16:
5338    Spacing = 1;
5339    return ARM::VLD2LNd16_UPD;
5340  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5341  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5342  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5343    Spacing = 1;
5344    return ARM::VLD2LNd32_UPD;
5345  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5346  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5347  case ARM::VLD2LNqWB_fixed_Asm_U16:
5348    Spacing = 1;
5349    return ARM::VLD2LNq16_UPD;
5350  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5351  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5352  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5353    Spacing = 2;
5354    return ARM::VLD2LNq32_UPD;
5355  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5356  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5357  case ARM::VLD2LNdWB_register_Asm_U8:
5358    Spacing = 1;
5359    return ARM::VLD2LNd8_UPD;
5360  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5361  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5362  case ARM::VLD2LNdWB_register_Asm_U16:
5363    Spacing = 1;
5364    return ARM::VLD2LNd16_UPD;
5365  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5366  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5367  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5368    Spacing = 1;
5369    return ARM::VLD2LNd32_UPD;
5370  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5371  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5372  case ARM::VLD2LNqWB_register_Asm_U16:
5373    Spacing = 2;
5374    return ARM::VLD2LNq16_UPD;
5375  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5376  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5377  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5378    Spacing = 2;
5379    return ARM::VLD2LNq32_UPD;
5380  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5381  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5382  case ARM::VLD2LNdAsm_U8:
5383    Spacing = 1;
5384    return ARM::VLD2LNd8;
5385  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5386  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5387  case ARM::VLD2LNdAsm_U16:
5388    Spacing = 1;
5389    return ARM::VLD2LNd16;
5390  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5391  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5392  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5393    Spacing = 1;
5394    return ARM::VLD2LNd32;
5395  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5396  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5397  case ARM::VLD2LNqAsm_U16:
5398    Spacing = 2;
5399    return ARM::VLD2LNq16;
5400  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5401  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5402  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5403    Spacing = 2;
5404    return ARM::VLD2LNq32;
5405  }
5406}
5407
5408bool ARMAsmParser::
5409processInstruction(MCInst &Inst,
5410                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5411  switch (Inst.getOpcode()) {
5412  // Aliases for alternate PC+imm syntax of LDR instructions.
5413  case ARM::t2LDRpcrel:
5414    Inst.setOpcode(ARM::t2LDRpci);
5415    return true;
5416  case ARM::t2LDRBpcrel:
5417    Inst.setOpcode(ARM::t2LDRBpci);
5418    return true;
5419  case ARM::t2LDRHpcrel:
5420    Inst.setOpcode(ARM::t2LDRHpci);
5421    return true;
5422  case ARM::t2LDRSBpcrel:
5423    Inst.setOpcode(ARM::t2LDRSBpci);
5424    return true;
5425  case ARM::t2LDRSHpcrel:
5426    Inst.setOpcode(ARM::t2LDRSHpci);
5427    return true;
5428  // Handle NEON VST complex aliases.
5429  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5430  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5431  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5432  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5433  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5434  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5435  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5436  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5437    MCInst TmpInst;
5438    // Shuffle the operands around so the lane index operand is in the
5439    // right place.
5440    unsigned Spacing;
5441    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5442    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5443    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5444    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5445    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5446    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5447    TmpInst.addOperand(Inst.getOperand(1)); // lane
5448    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5449    TmpInst.addOperand(Inst.getOperand(6));
5450    Inst = TmpInst;
5451    return true;
5452  }
5453
5454  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5455  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5456  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5457  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5458  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5459  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5460  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5461  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5462  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5463  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5464  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5465  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5466  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5467  case ARM::VST2LNqWB_register_Asm_U32: {
5468    MCInst TmpInst;
5469    // Shuffle the operands around so the lane index operand is in the
5470    // right place.
5471    unsigned Spacing;
5472    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5473    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5474    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5475    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5476    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5477    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5478    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5479                                            Spacing));
5480    TmpInst.addOperand(Inst.getOperand(1)); // lane
5481    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5482    TmpInst.addOperand(Inst.getOperand(6));
5483    Inst = TmpInst;
5484    return true;
5485  }
5486  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5487  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5488  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5489  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5490  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5491  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5492  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5493  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5494    MCInst TmpInst;
5495    // Shuffle the operands around so the lane index operand is in the
5496    // right place.
5497    unsigned Spacing;
5498    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5499    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5500    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5501    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5502    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5503    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5504    TmpInst.addOperand(Inst.getOperand(1)); // lane
5505    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5506    TmpInst.addOperand(Inst.getOperand(5));
5507    Inst = TmpInst;
5508    return true;
5509  }
5510
5511  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5512  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5513  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5514  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5515  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5516  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5517  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5518  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5519  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5520  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5521  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5522  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5523  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5524  case ARM::VST2LNqWB_fixed_Asm_U32: {
5525    MCInst TmpInst;
5526    // Shuffle the operands around so the lane index operand is in the
5527    // right place.
5528    unsigned Spacing;
5529    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5530    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5531    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5532    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5533    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5534    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5535    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5536                                            Spacing));
5537    TmpInst.addOperand(Inst.getOperand(1)); // lane
5538    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5539    TmpInst.addOperand(Inst.getOperand(5));
5540    Inst = TmpInst;
5541    return true;
5542  }
5543  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5544  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5545  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5546  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5547  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5548  case ARM::VST1LNdAsm_U32: {
5549    MCInst TmpInst;
5550    // Shuffle the operands around so the lane index operand is in the
5551    // right place.
5552    unsigned Spacing;
5553    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5554    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5555    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5556    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5557    TmpInst.addOperand(Inst.getOperand(1)); // lane
5558    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5559    TmpInst.addOperand(Inst.getOperand(5));
5560    Inst = TmpInst;
5561    return true;
5562  }
5563
5564  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5565  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5566  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5567  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5568  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5569  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5570  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5571  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5572  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5573    MCInst TmpInst;
5574    // Shuffle the operands around so the lane index operand is in the
5575    // right place.
5576    unsigned Spacing;
5577    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5578    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5579    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5580    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5581    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5582                                            Spacing));
5583    TmpInst.addOperand(Inst.getOperand(1)); // lane
5584    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5585    TmpInst.addOperand(Inst.getOperand(5));
5586    Inst = TmpInst;
5587    return true;
5588  }
5589  // Handle NEON VLD complex aliases.
5590  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5591  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5592  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5593  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5594  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5595  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5596  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5597  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5598    MCInst TmpInst;
5599    // Shuffle the operands around so the lane index operand is in the
5600    // right place.
5601    unsigned Spacing;
5602    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5603    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5604    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5605    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5606    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5607    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5608    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5609    TmpInst.addOperand(Inst.getOperand(1)); // lane
5610    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5611    TmpInst.addOperand(Inst.getOperand(6));
5612    Inst = TmpInst;
5613    return true;
5614  }
5615
5616  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5617  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5618  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5619  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5620  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5621  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5622  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5623  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5624  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5625  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5626  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5627  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5628  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5629  case ARM::VLD2LNqWB_register_Asm_U32: {
5630    MCInst TmpInst;
5631    // Shuffle the operands around so the lane index operand is in the
5632    // right place.
5633    unsigned Spacing;
5634    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5635    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5636    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5637                                            Spacing));
5638    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5639    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5640    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5641    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5642    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5643    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5644                                            Spacing));
5645    TmpInst.addOperand(Inst.getOperand(1)); // lane
5646    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5647    TmpInst.addOperand(Inst.getOperand(6));
5648    Inst = TmpInst;
5649    return true;
5650  }
5651
5652  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5653  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5654  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5655  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5656  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5657  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5658  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5659  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5660    MCInst TmpInst;
5661    // Shuffle the operands around so the lane index operand is in the
5662    // right place.
5663    unsigned Spacing;
5664    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5665    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5666    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5667    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5668    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5669    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5670    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5671    TmpInst.addOperand(Inst.getOperand(1)); // lane
5672    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5673    TmpInst.addOperand(Inst.getOperand(5));
5674    Inst = TmpInst;
5675    return true;
5676  }
5677
5678  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5679  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5680  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5681  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5682  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5683  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5684  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5685  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5686  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5687  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5688  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5689  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5690  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5691  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5692    MCInst TmpInst;
5693    // Shuffle the operands around so the lane index operand is in the
5694    // right place.
5695    unsigned Spacing;
5696    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5697    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5698    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5699                                            Spacing));
5700    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5701    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5702    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5703    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5704    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5705    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5706                                            Spacing));
5707    TmpInst.addOperand(Inst.getOperand(1)); // lane
5708    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5709    TmpInst.addOperand(Inst.getOperand(5));
5710    Inst = TmpInst;
5711    return true;
5712  }
5713
5714  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5715  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5716  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5717  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5718  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5719  case ARM::VLD1LNdAsm_U32: {
5720    MCInst TmpInst;
5721    // Shuffle the operands around so the lane index operand is in the
5722    // right place.
5723    unsigned Spacing;
5724    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5725    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5726    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5727    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5728    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5729    TmpInst.addOperand(Inst.getOperand(1)); // lane
5730    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5731    TmpInst.addOperand(Inst.getOperand(5));
5732    Inst = TmpInst;
5733    return true;
5734  }
5735
5736  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5737  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5738  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5739  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5740  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5741  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5742  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5743  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5744  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5745  case ARM::VLD2LNqAsm_U32: {
5746    MCInst TmpInst;
5747    // Shuffle the operands around so the lane index operand is in the
5748    // right place.
5749    unsigned Spacing;
5750    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5751    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5752    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5753                                            Spacing));
5754    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5755    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5756    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5757    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5758                                            Spacing));
5759    TmpInst.addOperand(Inst.getOperand(1)); // lane
5760    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5761    TmpInst.addOperand(Inst.getOperand(5));
5762    Inst = TmpInst;
5763    return true;
5764  }
5765  // Handle the Thumb2 mode MOV complex aliases.
5766  case ARM::t2MOVsr:
5767  case ARM::t2MOVSsr: {
5768    // Which instruction to expand to depends on the CCOut operand and
5769    // whether we're in an IT block if the register operands are low
5770    // registers.
5771    bool isNarrow = false;
5772    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5773        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5774        isARMLowRegister(Inst.getOperand(2).getReg()) &&
5775        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
5776        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
5777      isNarrow = true;
5778    MCInst TmpInst;
5779    unsigned newOpc;
5780    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
5781    default: llvm_unreachable("unexpected opcode!");
5782    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
5783    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
5784    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
5785    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
5786    }
5787    TmpInst.setOpcode(newOpc);
5788    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5789    if (isNarrow)
5790      TmpInst.addOperand(MCOperand::CreateReg(
5791          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5792    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5793    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5794    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5795    TmpInst.addOperand(Inst.getOperand(5));
5796    if (!isNarrow)
5797      TmpInst.addOperand(MCOperand::CreateReg(
5798          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5799    Inst = TmpInst;
5800    return true;
5801  }
5802  case ARM::t2MOVsi:
5803  case ARM::t2MOVSsi: {
5804    // Which instruction to expand to depends on the CCOut operand and
5805    // whether we're in an IT block if the register operands are low
5806    // registers.
5807    bool isNarrow = false;
5808    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5809        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5810        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5811      isNarrow = true;
5812    MCInst TmpInst;
5813    unsigned newOpc;
5814    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5815    default: llvm_unreachable("unexpected opcode!");
5816    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5817    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5818    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5819    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5820    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
5821    }
5822    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5823    if (Ammount == 32) Ammount = 0;
5824    TmpInst.setOpcode(newOpc);
5825    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5826    if (isNarrow)
5827      TmpInst.addOperand(MCOperand::CreateReg(
5828          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5829    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5830    if (newOpc != ARM::t2RRX)
5831      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5832    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5833    TmpInst.addOperand(Inst.getOperand(4));
5834    if (!isNarrow)
5835      TmpInst.addOperand(MCOperand::CreateReg(
5836          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5837    Inst = TmpInst;
5838    return true;
5839  }
5840  // Handle the ARM mode MOV complex aliases.
5841  case ARM::ASRr:
5842  case ARM::LSRr:
5843  case ARM::LSLr:
5844  case ARM::RORr: {
5845    ARM_AM::ShiftOpc ShiftTy;
5846    switch(Inst.getOpcode()) {
5847    default: llvm_unreachable("unexpected opcode!");
5848    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5849    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5850    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5851    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5852    }
5853    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5854    MCInst TmpInst;
5855    TmpInst.setOpcode(ARM::MOVsr);
5856    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5857    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5858    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5859    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5860    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5861    TmpInst.addOperand(Inst.getOperand(4));
5862    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5863    Inst = TmpInst;
5864    return true;
5865  }
5866  case ARM::ASRi:
5867  case ARM::LSRi:
5868  case ARM::LSLi:
5869  case ARM::RORi: {
5870    ARM_AM::ShiftOpc ShiftTy;
5871    switch(Inst.getOpcode()) {
5872    default: llvm_unreachable("unexpected opcode!");
5873    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5874    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5875    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5876    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5877    }
5878    // A shift by zero is a plain MOVr, not a MOVsi.
5879    unsigned Amt = Inst.getOperand(2).getImm();
5880    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5881    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5882    MCInst TmpInst;
5883    TmpInst.setOpcode(Opc);
5884    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5885    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5886    if (Opc == ARM::MOVsi)
5887      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5888    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5889    TmpInst.addOperand(Inst.getOperand(4));
5890    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5891    Inst = TmpInst;
5892    return true;
5893  }
5894  case ARM::RRXi: {
5895    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5896    MCInst TmpInst;
5897    TmpInst.setOpcode(ARM::MOVsi);
5898    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5899    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5900    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5901    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5902    TmpInst.addOperand(Inst.getOperand(3));
5903    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5904    Inst = TmpInst;
5905    return true;
5906  }
5907  case ARM::t2LDMIA_UPD: {
5908    // If this is a load of a single register, then we should use
5909    // a post-indexed LDR instruction instead, per the ARM ARM.
5910    if (Inst.getNumOperands() != 5)
5911      return false;
5912    MCInst TmpInst;
5913    TmpInst.setOpcode(ARM::t2LDR_POST);
5914    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5915    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5916    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5917    TmpInst.addOperand(MCOperand::CreateImm(4));
5918    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5919    TmpInst.addOperand(Inst.getOperand(3));
5920    Inst = TmpInst;
5921    return true;
5922  }
5923  case ARM::t2STMDB_UPD: {
5924    // If this is a store of a single register, then we should use
5925    // a pre-indexed STR instruction instead, per the ARM ARM.
5926    if (Inst.getNumOperands() != 5)
5927      return false;
5928    MCInst TmpInst;
5929    TmpInst.setOpcode(ARM::t2STR_PRE);
5930    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5931    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5932    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5933    TmpInst.addOperand(MCOperand::CreateImm(-4));
5934    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5935    TmpInst.addOperand(Inst.getOperand(3));
5936    Inst = TmpInst;
5937    return true;
5938  }
5939  case ARM::LDMIA_UPD:
5940    // If this is a load of a single register via a 'pop', then we should use
5941    // a post-indexed LDR instruction instead, per the ARM ARM.
5942    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5943        Inst.getNumOperands() == 5) {
5944      MCInst TmpInst;
5945      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5946      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5947      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5948      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5949      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5950      TmpInst.addOperand(MCOperand::CreateImm(4));
5951      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5952      TmpInst.addOperand(Inst.getOperand(3));
5953      Inst = TmpInst;
5954      return true;
5955    }
5956    break;
5957  case ARM::STMDB_UPD:
5958    // If this is a store of a single register via a 'push', then we should use
5959    // a pre-indexed STR instruction instead, per the ARM ARM.
5960    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5961        Inst.getNumOperands() == 5) {
5962      MCInst TmpInst;
5963      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5964      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5965      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5966      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5967      TmpInst.addOperand(MCOperand::CreateImm(-4));
5968      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5969      TmpInst.addOperand(Inst.getOperand(3));
5970      Inst = TmpInst;
5971    }
5972    break;
5973  case ARM::t2ADDri12:
5974    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5975    // mnemonic was used (not "addw"), encoding T3 is preferred.
5976    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5977        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5978      break;
5979    Inst.setOpcode(ARM::t2ADDri);
5980    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5981    break;
5982  case ARM::t2SUBri12:
5983    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5984    // mnemonic was used (not "subw"), encoding T3 is preferred.
5985    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5986        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5987      break;
5988    Inst.setOpcode(ARM::t2SUBri);
5989    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5990    break;
5991  case ARM::tADDi8:
5992    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5993    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5994    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5995    // to encoding T1 if <Rd> is omitted."
5996    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5997      Inst.setOpcode(ARM::tADDi3);
5998      return true;
5999    }
6000    break;
6001  case ARM::tSUBi8:
6002    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6003    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6004    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6005    // to encoding T1 if <Rd> is omitted."
6006    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6007      Inst.setOpcode(ARM::tSUBi3);
6008      return true;
6009    }
6010    break;
6011  case ARM::t2ADDrr: {
6012    // If the destination and first source operand are the same, and
6013    // there's no setting of the flags, use encoding T2 instead of T3.
6014    // Note that this is only for ADD, not SUB. This mirrors the system
6015    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6016    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6017        Inst.getOperand(5).getReg() != 0 ||
6018        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6019         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6020      break;
6021    MCInst TmpInst;
6022    TmpInst.setOpcode(ARM::tADDhirr);
6023    TmpInst.addOperand(Inst.getOperand(0));
6024    TmpInst.addOperand(Inst.getOperand(0));
6025    TmpInst.addOperand(Inst.getOperand(2));
6026    TmpInst.addOperand(Inst.getOperand(3));
6027    TmpInst.addOperand(Inst.getOperand(4));
6028    Inst = TmpInst;
6029    return true;
6030  }
6031  case ARM::tB:
6032    // A Thumb conditional branch outside of an IT block is a tBcc.
6033    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6034      Inst.setOpcode(ARM::tBcc);
6035      return true;
6036    }
6037    break;
6038  case ARM::t2B:
6039    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6040    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6041      Inst.setOpcode(ARM::t2Bcc);
6042      return true;
6043    }
6044    break;
6045  case ARM::t2Bcc:
6046    // If the conditional is AL or we're in an IT block, we really want t2B.
6047    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6048      Inst.setOpcode(ARM::t2B);
6049      return true;
6050    }
6051    break;
6052  case ARM::tBcc:
6053    // If the conditional is AL, we really want tB.
6054    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6055      Inst.setOpcode(ARM::tB);
6056      return true;
6057    }
6058    break;
6059  case ARM::tLDMIA: {
6060    // If the register list contains any high registers, or if the writeback
6061    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6062    // instead if we're in Thumb2. Otherwise, this should have generated
6063    // an error in validateInstruction().
6064    unsigned Rn = Inst.getOperand(0).getReg();
6065    bool hasWritebackToken =
6066      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6067       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6068    bool listContainsBase;
6069    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6070        (!listContainsBase && !hasWritebackToken) ||
6071        (listContainsBase && hasWritebackToken)) {
6072      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6073      assert (isThumbTwo());
6074      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6075      // If we're switching to the updating version, we need to insert
6076      // the writeback tied operand.
6077      if (hasWritebackToken)
6078        Inst.insert(Inst.begin(),
6079                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6080      return true;
6081    }
6082    break;
6083  }
6084  case ARM::tSTMIA_UPD: {
6085    // If the register list contains any high registers, we need to use
6086    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6087    // should have generated an error in validateInstruction().
6088    unsigned Rn = Inst.getOperand(0).getReg();
6089    bool listContainsBase;
6090    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6091      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6092      assert (isThumbTwo());
6093      Inst.setOpcode(ARM::t2STMIA_UPD);
6094      return true;
6095    }
6096    break;
6097  }
6098  case ARM::tPOP: {
6099    bool listContainsBase;
6100    // If the register list contains any high registers, we need to use
6101    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6102    // should have generated an error in validateInstruction().
6103    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6104      return false;
6105    assert (isThumbTwo());
6106    Inst.setOpcode(ARM::t2LDMIA_UPD);
6107    // Add the base register and writeback operands.
6108    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6109    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6110    return true;
6111  }
6112  case ARM::tPUSH: {
6113    bool listContainsBase;
6114    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6115      return false;
6116    assert (isThumbTwo());
6117    Inst.setOpcode(ARM::t2STMDB_UPD);
6118    // Add the base register and writeback operands.
6119    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6120    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6121    return true;
6122  }
6123  case ARM::t2MOVi: {
6124    // If we can use the 16-bit encoding and the user didn't explicitly
6125    // request the 32-bit variant, transform it here.
6126    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6127        Inst.getOperand(1).getImm() <= 255 &&
6128        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6129         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6130        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6131        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6132         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6133      // The operands aren't in the same order for tMOVi8...
6134      MCInst TmpInst;
6135      TmpInst.setOpcode(ARM::tMOVi8);
6136      TmpInst.addOperand(Inst.getOperand(0));
6137      TmpInst.addOperand(Inst.getOperand(4));
6138      TmpInst.addOperand(Inst.getOperand(1));
6139      TmpInst.addOperand(Inst.getOperand(2));
6140      TmpInst.addOperand(Inst.getOperand(3));
6141      Inst = TmpInst;
6142      return true;
6143    }
6144    break;
6145  }
6146  case ARM::t2MOVr: {
6147    // If we can use the 16-bit encoding and the user didn't explicitly
6148    // request the 32-bit variant, transform it here.
6149    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6150        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6151        Inst.getOperand(2).getImm() == ARMCC::AL &&
6152        Inst.getOperand(4).getReg() == ARM::CPSR &&
6153        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6154         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6155      // The operands aren't the same for tMOV[S]r... (no cc_out)
6156      MCInst TmpInst;
6157      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6158      TmpInst.addOperand(Inst.getOperand(0));
6159      TmpInst.addOperand(Inst.getOperand(1));
6160      TmpInst.addOperand(Inst.getOperand(2));
6161      TmpInst.addOperand(Inst.getOperand(3));
6162      Inst = TmpInst;
6163      return true;
6164    }
6165    break;
6166  }
6167  case ARM::t2SXTH:
6168  case ARM::t2SXTB:
6169  case ARM::t2UXTH:
6170  case ARM::t2UXTB: {
6171    // If we can use the 16-bit encoding and the user didn't explicitly
6172    // request the 32-bit variant, transform it here.
6173    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6174        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6175        Inst.getOperand(2).getImm() == 0 &&
6176        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6177         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6178      unsigned NewOpc;
6179      switch (Inst.getOpcode()) {
6180      default: llvm_unreachable("Illegal opcode!");
6181      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6182      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6183      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6184      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6185      }
6186      // The operands aren't the same for thumb1 (no rotate operand).
6187      MCInst TmpInst;
6188      TmpInst.setOpcode(NewOpc);
6189      TmpInst.addOperand(Inst.getOperand(0));
6190      TmpInst.addOperand(Inst.getOperand(1));
6191      TmpInst.addOperand(Inst.getOperand(3));
6192      TmpInst.addOperand(Inst.getOperand(4));
6193      Inst = TmpInst;
6194      return true;
6195    }
6196    break;
6197  }
6198  case ARM::MOVsi: {
6199    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6200    if (SOpc == ARM_AM::rrx) return false;
6201    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6202      // Shifting by zero is accepted as a vanilla 'MOVr'
6203      MCInst TmpInst;
6204      TmpInst.setOpcode(ARM::MOVr);
6205      TmpInst.addOperand(Inst.getOperand(0));
6206      TmpInst.addOperand(Inst.getOperand(1));
6207      TmpInst.addOperand(Inst.getOperand(3));
6208      TmpInst.addOperand(Inst.getOperand(4));
6209      TmpInst.addOperand(Inst.getOperand(5));
6210      Inst = TmpInst;
6211      return true;
6212    }
6213    return false;
6214  }
6215  case ARM::ANDrsi:
6216  case ARM::ORRrsi:
6217  case ARM::EORrsi:
6218  case ARM::BICrsi:
6219  case ARM::SUBrsi:
6220  case ARM::ADDrsi: {
6221    unsigned newOpc;
6222    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
6223    if (SOpc == ARM_AM::rrx) return false;
6224    switch (Inst.getOpcode()) {
6225    default: assert(0 && "unexpected opcode!");
6226    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
6227    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
6228    case ARM::EORrsi: newOpc = ARM::EORrr; break;
6229    case ARM::BICrsi: newOpc = ARM::BICrr; break;
6230    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
6231    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
6232    }
6233    // If the shift is by zero, use the non-shifted instruction definition.
6234    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
6235      MCInst TmpInst;
6236      TmpInst.setOpcode(newOpc);
6237      TmpInst.addOperand(Inst.getOperand(0));
6238      TmpInst.addOperand(Inst.getOperand(1));
6239      TmpInst.addOperand(Inst.getOperand(2));
6240      TmpInst.addOperand(Inst.getOperand(4));
6241      TmpInst.addOperand(Inst.getOperand(5));
6242      TmpInst.addOperand(Inst.getOperand(6));
6243      Inst = TmpInst;
6244      return true;
6245    }
6246    return false;
6247  }
6248  case ARM::t2IT: {
6249    // The mask bits for all but the first condition are represented as
6250    // the low bit of the condition code value implies 't'. We currently
6251    // always have 1 implies 't', so XOR toggle the bits if the low bit
6252    // of the condition code is zero. The encoding also expects the low
6253    // bit of the condition to be encoded as bit 4 of the mask operand,
6254    // so mask that in if needed
6255    MCOperand &MO = Inst.getOperand(1);
6256    unsigned Mask = MO.getImm();
6257    unsigned OrigMask = Mask;
6258    unsigned TZ = CountTrailingZeros_32(Mask);
6259    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6260      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6261      for (unsigned i = 3; i != TZ; --i)
6262        Mask ^= 1 << i;
6263    } else
6264      Mask |= 0x10;
6265    MO.setImm(Mask);
6266
6267    // Set up the IT block state according to the IT instruction we just
6268    // matched.
6269    assert(!inITBlock() && "nested IT blocks?!");
6270    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6271    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6272    ITState.CurPosition = 0;
6273    ITState.FirstCond = true;
6274    break;
6275  }
6276  }
6277  return false;
6278}
6279
6280unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6281  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6282  // suffix depending on whether they're in an IT block or not.
6283  unsigned Opc = Inst.getOpcode();
6284  const MCInstrDesc &MCID = getInstDesc(Opc);
6285  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6286    assert(MCID.hasOptionalDef() &&
6287           "optionally flag setting instruction missing optional def operand");
6288    assert(MCID.NumOperands == Inst.getNumOperands() &&
6289           "operand count mismatch!");
6290    // Find the optional-def operand (cc_out).
6291    unsigned OpNo;
6292    for (OpNo = 0;
6293         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6294         ++OpNo)
6295      ;
6296    // If we're parsing Thumb1, reject it completely.
6297    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6298      return Match_MnemonicFail;
6299    // If we're parsing Thumb2, which form is legal depends on whether we're
6300    // in an IT block.
6301    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6302        !inITBlock())
6303      return Match_RequiresITBlock;
6304    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6305        inITBlock())
6306      return Match_RequiresNotITBlock;
6307  }
6308  // Some high-register supporting Thumb1 encodings only allow both registers
6309  // to be from r0-r7 when in Thumb2.
6310  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6311           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6312           isARMLowRegister(Inst.getOperand(2).getReg()))
6313    return Match_RequiresThumb2;
6314  // Others only require ARMv6 or later.
6315  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6316           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6317           isARMLowRegister(Inst.getOperand(1).getReg()))
6318    return Match_RequiresV6;
6319  return Match_Success;
6320}
6321
6322bool ARMAsmParser::
6323MatchAndEmitInstruction(SMLoc IDLoc,
6324                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6325                        MCStreamer &Out) {
6326  MCInst Inst;
6327  unsigned ErrorInfo;
6328  unsigned MatchResult;
6329  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6330  switch (MatchResult) {
6331  default: break;
6332  case Match_Success:
6333    // Context sensitive operand constraints aren't handled by the matcher,
6334    // so check them here.
6335    if (validateInstruction(Inst, Operands)) {
6336      // Still progress the IT block, otherwise one wrong condition causes
6337      // nasty cascading errors.
6338      forwardITPosition();
6339      return true;
6340    }
6341
6342    // Some instructions need post-processing to, for example, tweak which
6343    // encoding is selected. Loop on it while changes happen so the
6344    // individual transformations can chain off each other. E.g.,
6345    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6346    while (processInstruction(Inst, Operands))
6347      ;
6348
6349    // Only move forward at the very end so that everything in validate
6350    // and process gets a consistent answer about whether we're in an IT
6351    // block.
6352    forwardITPosition();
6353
6354    Out.EmitInstruction(Inst);
6355    return false;
6356  case Match_MissingFeature:
6357    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6358    return true;
6359  case Match_InvalidOperand: {
6360    SMLoc ErrorLoc = IDLoc;
6361    if (ErrorInfo != ~0U) {
6362      if (ErrorInfo >= Operands.size())
6363        return Error(IDLoc, "too few operands for instruction");
6364
6365      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6366      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6367    }
6368
6369    return Error(ErrorLoc, "invalid operand for instruction");
6370  }
6371  case Match_MnemonicFail:
6372    return Error(IDLoc, "invalid instruction");
6373  case Match_ConversionFail:
6374    // The converter function will have already emited a diagnostic.
6375    return true;
6376  case Match_RequiresNotITBlock:
6377    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6378  case Match_RequiresITBlock:
6379    return Error(IDLoc, "instruction only valid inside IT block");
6380  case Match_RequiresV6:
6381    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6382  case Match_RequiresThumb2:
6383    return Error(IDLoc, "instruction variant requires Thumb2");
6384  }
6385
6386  llvm_unreachable("Implement any new match types added!");
6387  return true;
6388}
6389
6390/// parseDirective parses the arm specific directives
6391bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6392  StringRef IDVal = DirectiveID.getIdentifier();
6393  if (IDVal == ".word")
6394    return parseDirectiveWord(4, DirectiveID.getLoc());
6395  else if (IDVal == ".thumb")
6396    return parseDirectiveThumb(DirectiveID.getLoc());
6397  else if (IDVal == ".arm")
6398    return parseDirectiveARM(DirectiveID.getLoc());
6399  else if (IDVal == ".thumb_func")
6400    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6401  else if (IDVal == ".code")
6402    return parseDirectiveCode(DirectiveID.getLoc());
6403  else if (IDVal == ".syntax")
6404    return parseDirectiveSyntax(DirectiveID.getLoc());
6405  else if (IDVal == ".unreq")
6406    return parseDirectiveUnreq(DirectiveID.getLoc());
6407  else if (IDVal == ".arch")
6408    return parseDirectiveArch(DirectiveID.getLoc());
6409  else if (IDVal == ".eabi_attribute")
6410    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6411  return true;
6412}
6413
6414/// parseDirectiveWord
6415///  ::= .word [ expression (, expression)* ]
6416bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6417  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6418    for (;;) {
6419      const MCExpr *Value;
6420      if (getParser().ParseExpression(Value))
6421        return true;
6422
6423      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6424
6425      if (getLexer().is(AsmToken::EndOfStatement))
6426        break;
6427
6428      // FIXME: Improve diagnostic.
6429      if (getLexer().isNot(AsmToken::Comma))
6430        return Error(L, "unexpected token in directive");
6431      Parser.Lex();
6432    }
6433  }
6434
6435  Parser.Lex();
6436  return false;
6437}
6438
6439/// parseDirectiveThumb
6440///  ::= .thumb
6441bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6442  if (getLexer().isNot(AsmToken::EndOfStatement))
6443    return Error(L, "unexpected token in directive");
6444  Parser.Lex();
6445
6446  if (!isThumb())
6447    SwitchMode();
6448  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6449  return false;
6450}
6451
6452/// parseDirectiveARM
6453///  ::= .arm
6454bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6455  if (getLexer().isNot(AsmToken::EndOfStatement))
6456    return Error(L, "unexpected token in directive");
6457  Parser.Lex();
6458
6459  if (isThumb())
6460    SwitchMode();
6461  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6462  return false;
6463}
6464
6465/// parseDirectiveThumbFunc
6466///  ::= .thumbfunc symbol_name
6467bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6468  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6469  bool isMachO = MAI.hasSubsectionsViaSymbols();
6470  StringRef Name;
6471  bool needFuncName = true;
6472
6473  // Darwin asm has (optionally) function name after .thumb_func direction
6474  // ELF doesn't
6475  if (isMachO) {
6476    const AsmToken &Tok = Parser.getTok();
6477    if (Tok.isNot(AsmToken::EndOfStatement)) {
6478      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6479        return Error(L, "unexpected token in .thumb_func directive");
6480      Name = Tok.getIdentifier();
6481      Parser.Lex(); // Consume the identifier token.
6482      needFuncName = false;
6483    }
6484  }
6485
6486  if (getLexer().isNot(AsmToken::EndOfStatement))
6487    return Error(L, "unexpected token in directive");
6488
6489  // Eat the end of statement and any blank lines that follow.
6490  while (getLexer().is(AsmToken::EndOfStatement))
6491    Parser.Lex();
6492
6493  // FIXME: assuming function name will be the line following .thumb_func
6494  // We really should be checking the next symbol definition even if there's
6495  // stuff in between.
6496  if (needFuncName) {
6497    Name = Parser.getTok().getIdentifier();
6498  }
6499
6500  // Mark symbol as a thumb symbol.
6501  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6502  getParser().getStreamer().EmitThumbFunc(Func);
6503  return false;
6504}
6505
6506/// parseDirectiveSyntax
6507///  ::= .syntax unified | divided
6508bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6509  const AsmToken &Tok = Parser.getTok();
6510  if (Tok.isNot(AsmToken::Identifier))
6511    return Error(L, "unexpected token in .syntax directive");
6512  StringRef Mode = Tok.getString();
6513  if (Mode == "unified" || Mode == "UNIFIED")
6514    Parser.Lex();
6515  else if (Mode == "divided" || Mode == "DIVIDED")
6516    return Error(L, "'.syntax divided' arm asssembly not supported");
6517  else
6518    return Error(L, "unrecognized syntax mode in .syntax directive");
6519
6520  if (getLexer().isNot(AsmToken::EndOfStatement))
6521    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6522  Parser.Lex();
6523
6524  // TODO tell the MC streamer the mode
6525  // getParser().getStreamer().Emit???();
6526  return false;
6527}
6528
6529/// parseDirectiveCode
6530///  ::= .code 16 | 32
6531bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6532  const AsmToken &Tok = Parser.getTok();
6533  if (Tok.isNot(AsmToken::Integer))
6534    return Error(L, "unexpected token in .code directive");
6535  int64_t Val = Parser.getTok().getIntVal();
6536  if (Val == 16)
6537    Parser.Lex();
6538  else if (Val == 32)
6539    Parser.Lex();
6540  else
6541    return Error(L, "invalid operand to .code directive");
6542
6543  if (getLexer().isNot(AsmToken::EndOfStatement))
6544    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6545  Parser.Lex();
6546
6547  if (Val == 16) {
6548    if (!isThumb())
6549      SwitchMode();
6550    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6551  } else {
6552    if (isThumb())
6553      SwitchMode();
6554    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6555  }
6556
6557  return false;
6558}
6559
6560/// parseDirectiveReq
6561///  ::= name .req registername
6562bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6563  Parser.Lex(); // Eat the '.req' token.
6564  unsigned Reg;
6565  SMLoc SRegLoc, ERegLoc;
6566  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6567    Parser.EatToEndOfStatement();
6568    return Error(SRegLoc, "register name expected");
6569  }
6570
6571  // Shouldn't be anything else.
6572  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6573    Parser.EatToEndOfStatement();
6574    return Error(Parser.getTok().getLoc(),
6575                 "unexpected input in .req directive.");
6576  }
6577
6578  Parser.Lex(); // Consume the EndOfStatement
6579
6580  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6581    return Error(SRegLoc, "redefinition of '" + Name +
6582                          "' does not match original.");
6583
6584  return false;
6585}
6586
6587/// parseDirectiveUneq
6588///  ::= .unreq registername
6589bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6590  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6591    Parser.EatToEndOfStatement();
6592    return Error(L, "unexpected input in .unreq directive.");
6593  }
6594  RegisterReqs.erase(Parser.getTok().getIdentifier());
6595  Parser.Lex(); // Eat the identifier.
6596  return false;
6597}
6598
6599/// parseDirectiveArch
6600///  ::= .arch token
6601bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6602  return true;
6603}
6604
6605/// parseDirectiveEabiAttr
6606///  ::= .eabi_attribute int, int
6607bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6608  return true;
6609}
6610
6611extern "C" void LLVMInitializeARMAsmLexer();
6612
6613/// Force static initialization.
6614extern "C" void LLVMInitializeARMAsmParser() {
6615  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6616  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6617  LLVMInitializeARMAsmLexer();
6618}
6619
6620#define GET_REGISTER_MATCHER
6621#define GET_MATCHER_IMPLEMENTATION
6622#include "ARMGenAsmMatcher.inc"
6623