ARMAsmParser.cpp revision de626ad8726677328e10dbdc15011254214437d7
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_FPImmediate,
274    k_MemBarrierOpt,
275    k_Memory,
276    k_PostIndexRegister,
277    k_MSRMask,
278    k_ProcIFlags,
279    k_VectorIndex,
280    k_Register,
281    k_RegisterList,
282    k_DPRRegisterList,
283    k_SPRRegisterList,
284    k_VectorList,
285    k_VectorListAllLanes,
286    k_VectorListIndexed,
287    k_ShiftedRegister,
288    k_ShiftedImmediate,
289    k_ShifterImmediate,
290    k_RotateImmediate,
291    k_BitfieldDescriptor,
292    k_Token
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296  SmallVector<unsigned, 8> Registers;
297
298  union {
299    struct {
300      ARMCC::CondCodes Val;
301    } CC;
302
303    struct {
304      unsigned Val;
305    } Cop;
306
307    struct {
308      unsigned Val;
309    } CoprocOption;
310
311    struct {
312      unsigned Mask:4;
313    } ITMask;
314
315    struct {
316      ARM_MB::MemBOpt Val;
317    } MBOpt;
318
319    struct {
320      ARM_PROC::IFlags Val;
321    } IFlags;
322
323    struct {
324      unsigned Val;
325    } MMask;
326
327    struct {
328      const char *Data;
329      unsigned Length;
330    } Tok;
331
332    struct {
333      unsigned RegNum;
334    } Reg;
335
336    // A vector register list is a sequential list of 1 to 4 registers.
337    struct {
338      unsigned RegNum;
339      unsigned Count;
340      unsigned LaneIndex;
341      bool isDoubleSpaced;
342    } VectorList;
343
344    struct {
345      unsigned Val;
346    } VectorIndex;
347
348    struct {
349      const MCExpr *Val;
350    } Imm;
351
352    struct {
353      unsigned Val;       // encoded 8-bit representation
354    } FPImm;
355
356    /// Combined record for all forms of ARM address expressions.
357    struct {
358      unsigned BaseRegNum;
359      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
360      // was specified.
361      const MCConstantExpr *OffsetImm;  // Offset immediate value
362      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
363      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
364      unsigned ShiftImm;        // shift for OffsetReg.
365      unsigned Alignment;       // 0 = no alignment specified
366                                // n = alignment in bytes (2, 4, 8, 16, or 32)
367      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
368    } Memory;
369
370    struct {
371      unsigned RegNum;
372      bool isAdd;
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned ShiftImm;
375    } PostIdxReg;
376
377    struct {
378      bool isASR;
379      unsigned Imm;
380    } ShifterImm;
381    struct {
382      ARM_AM::ShiftOpc ShiftTy;
383      unsigned SrcReg;
384      unsigned ShiftReg;
385      unsigned ShiftImm;
386    } RegShiftedReg;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftImm;
391    } RegShiftedImm;
392    struct {
393      unsigned Imm;
394    } RotImm;
395    struct {
396      unsigned LSB;
397      unsigned Width;
398    } Bitfield;
399  };
400
401  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
402public:
403  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
404    Kind = o.Kind;
405    StartLoc = o.StartLoc;
406    EndLoc = o.EndLoc;
407    switch (Kind) {
408    case k_CondCode:
409      CC = o.CC;
410      break;
411    case k_ITCondMask:
412      ITMask = o.ITMask;
413      break;
414    case k_Token:
415      Tok = o.Tok;
416      break;
417    case k_CCOut:
418    case k_Register:
419      Reg = o.Reg;
420      break;
421    case k_RegisterList:
422    case k_DPRRegisterList:
423    case k_SPRRegisterList:
424      Registers = o.Registers;
425      break;
426    case k_VectorList:
427    case k_VectorListAllLanes:
428    case k_VectorListIndexed:
429      VectorList = o.VectorList;
430      break;
431    case k_CoprocNum:
432    case k_CoprocReg:
433      Cop = o.Cop;
434      break;
435    case k_CoprocOption:
436      CoprocOption = o.CoprocOption;
437      break;
438    case k_Immediate:
439      Imm = o.Imm;
440      break;
441    case k_FPImmediate:
442      FPImm = o.FPImm;
443      break;
444    case k_MemBarrierOpt:
445      MBOpt = o.MBOpt;
446      break;
447    case k_Memory:
448      Memory = o.Memory;
449      break;
450    case k_PostIndexRegister:
451      PostIdxReg = o.PostIdxReg;
452      break;
453    case k_MSRMask:
454      MMask = o.MMask;
455      break;
456    case k_ProcIFlags:
457      IFlags = o.IFlags;
458      break;
459    case k_ShifterImmediate:
460      ShifterImm = o.ShifterImm;
461      break;
462    case k_ShiftedRegister:
463      RegShiftedReg = o.RegShiftedReg;
464      break;
465    case k_ShiftedImmediate:
466      RegShiftedImm = o.RegShiftedImm;
467      break;
468    case k_RotateImmediate:
469      RotImm = o.RotImm;
470      break;
471    case k_BitfieldDescriptor:
472      Bitfield = o.Bitfield;
473      break;
474    case k_VectorIndex:
475      VectorIndex = o.VectorIndex;
476      break;
477    }
478  }
479
480  /// getStartLoc - Get the location of the first token of this operand.
481  SMLoc getStartLoc() const { return StartLoc; }
482  /// getEndLoc - Get the location of the last token of this operand.
483  SMLoc getEndLoc() const { return EndLoc; }
484
485  ARMCC::CondCodes getCondCode() const {
486    assert(Kind == k_CondCode && "Invalid access!");
487    return CC.Val;
488  }
489
490  unsigned getCoproc() const {
491    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
492    return Cop.Val;
493  }
494
495  StringRef getToken() const {
496    assert(Kind == k_Token && "Invalid access!");
497    return StringRef(Tok.Data, Tok.Length);
498  }
499
500  unsigned getReg() const {
501    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
502    return Reg.RegNum;
503  }
504
505  const SmallVectorImpl<unsigned> &getRegList() const {
506    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
507            Kind == k_SPRRegisterList) && "Invalid access!");
508    return Registers;
509  }
510
511  const MCExpr *getImm() const {
512    assert(Kind == k_Immediate && "Invalid access!");
513    return Imm.Val;
514  }
515
516  unsigned getFPImm() const {
517    assert(Kind == k_FPImmediate && "Invalid access!");
518    return FPImm.Val;
519  }
520
521  unsigned getVectorIndex() const {
522    assert(Kind == k_VectorIndex && "Invalid access!");
523    return VectorIndex.Val;
524  }
525
526  ARM_MB::MemBOpt getMemBarrierOpt() const {
527    assert(Kind == k_MemBarrierOpt && "Invalid access!");
528    return MBOpt.Val;
529  }
530
531  ARM_PROC::IFlags getProcIFlags() const {
532    assert(Kind == k_ProcIFlags && "Invalid access!");
533    return IFlags.Val;
534  }
535
536  unsigned getMSRMask() const {
537    assert(Kind == k_MSRMask && "Invalid access!");
538    return MMask.Val;
539  }
540
541  bool isCoprocNum() const { return Kind == k_CoprocNum; }
542  bool isCoprocReg() const { return Kind == k_CoprocReg; }
543  bool isCoprocOption() const { return Kind == k_CoprocOption; }
544  bool isCondCode() const { return Kind == k_CondCode; }
545  bool isCCOut() const { return Kind == k_CCOut; }
546  bool isITMask() const { return Kind == k_ITCondMask; }
547  bool isITCondCode() const { return Kind == k_CondCode; }
548  bool isImm() const { return Kind == k_Immediate; }
549  bool isFPImm() const { return Kind == k_FPImmediate; }
550  bool isImm8s4() const {
551    if (Kind != k_Immediate)
552      return false;
553    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
554    if (!CE) return false;
555    int64_t Value = CE->getValue();
556    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
557  }
558  bool isImm0_1020s4() const {
559    if (Kind != k_Immediate)
560      return false;
561    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
562    if (!CE) return false;
563    int64_t Value = CE->getValue();
564    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
565  }
566  bool isImm0_508s4() const {
567    if (Kind != k_Immediate)
568      return false;
569    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
570    if (!CE) return false;
571    int64_t Value = CE->getValue();
572    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
573  }
574  bool isImm0_255() const {
575    if (Kind != k_Immediate)
576      return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return Value >= 0 && Value < 256;
581  }
582  bool isImm0_1() const {
583    if (Kind != k_Immediate)
584      return false;
585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
586    if (!CE) return false;
587    int64_t Value = CE->getValue();
588    return Value >= 0 && Value < 2;
589  }
590  bool isImm0_3() const {
591    if (Kind != k_Immediate)
592      return false;
593    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
594    if (!CE) return false;
595    int64_t Value = CE->getValue();
596    return Value >= 0 && Value < 4;
597  }
598  bool isImm0_7() const {
599    if (Kind != k_Immediate)
600      return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 8;
605  }
606  bool isImm0_15() const {
607    if (Kind != k_Immediate)
608      return false;
609    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
610    if (!CE) return false;
611    int64_t Value = CE->getValue();
612    return Value >= 0 && Value < 16;
613  }
614  bool isImm0_31() const {
615    if (Kind != k_Immediate)
616      return false;
617    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
618    if (!CE) return false;
619    int64_t Value = CE->getValue();
620    return Value >= 0 && Value < 32;
621  }
622  bool isImm0_63() const {
623    if (Kind != k_Immediate)
624      return false;
625    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
626    if (!CE) return false;
627    int64_t Value = CE->getValue();
628    return Value >= 0 && Value < 64;
629  }
630  bool isImm8() const {
631    if (Kind != k_Immediate)
632      return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 8;
637  }
638  bool isImm16() const {
639    if (Kind != k_Immediate)
640      return false;
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642    if (!CE) return false;
643    int64_t Value = CE->getValue();
644    return Value == 16;
645  }
646  bool isImm32() const {
647    if (Kind != k_Immediate)
648      return false;
649    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
650    if (!CE) return false;
651    int64_t Value = CE->getValue();
652    return Value == 32;
653  }
654  bool isShrImm8() const {
655    if (Kind != k_Immediate)
656      return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 8;
661  }
662  bool isShrImm16() const {
663    if (Kind != k_Immediate)
664      return false;
665    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666    if (!CE) return false;
667    int64_t Value = CE->getValue();
668    return Value > 0 && Value <= 16;
669  }
670  bool isShrImm32() const {
671    if (Kind != k_Immediate)
672      return false;
673    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
674    if (!CE) return false;
675    int64_t Value = CE->getValue();
676    return Value > 0 && Value <= 32;
677  }
678  bool isShrImm64() const {
679    if (Kind != k_Immediate)
680      return false;
681    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
682    if (!CE) return false;
683    int64_t Value = CE->getValue();
684    return Value > 0 && Value <= 64;
685  }
686  bool isImm1_7() const {
687    if (Kind != k_Immediate)
688      return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value > 0 && Value < 8;
693  }
694  bool isImm1_15() const {
695    if (Kind != k_Immediate)
696      return false;
697    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
698    if (!CE) return false;
699    int64_t Value = CE->getValue();
700    return Value > 0 && Value < 16;
701  }
702  bool isImm1_31() const {
703    if (Kind != k_Immediate)
704      return false;
705    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706    if (!CE) return false;
707    int64_t Value = CE->getValue();
708    return Value > 0 && Value < 32;
709  }
710  bool isImm1_16() const {
711    if (Kind != k_Immediate)
712      return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value > 0 && Value < 17;
717  }
718  bool isImm1_32() const {
719    if (Kind != k_Immediate)
720      return false;
721    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
722    if (!CE) return false;
723    int64_t Value = CE->getValue();
724    return Value > 0 && Value < 33;
725  }
726  bool isImm0_32() const {
727    if (Kind != k_Immediate)
728      return false;
729    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
730    if (!CE) return false;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value < 33;
733  }
734  bool isImm0_65535() const {
735    if (Kind != k_Immediate)
736      return false;
737    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
738    if (!CE) return false;
739    int64_t Value = CE->getValue();
740    return Value >= 0 && Value < 65536;
741  }
742  bool isImm0_65535Expr() const {
743    if (Kind != k_Immediate)
744      return false;
745    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
746    // If it's not a constant expression, it'll generate a fixup and be
747    // handled later.
748    if (!CE) return true;
749    int64_t Value = CE->getValue();
750    return Value >= 0 && Value < 65536;
751  }
752  bool isImm24bit() const {
753    if (Kind != k_Immediate)
754      return false;
755    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
756    if (!CE) return false;
757    int64_t Value = CE->getValue();
758    return Value >= 0 && Value <= 0xffffff;
759  }
760  bool isImmThumbSR() const {
761    if (Kind != k_Immediate)
762      return false;
763    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764    if (!CE) return false;
765    int64_t Value = CE->getValue();
766    return Value > 0 && Value < 33;
767  }
768  bool isPKHLSLImm() const {
769    if (Kind != k_Immediate)
770      return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return Value >= 0 && Value < 32;
775  }
776  bool isPKHASRImm() const {
777    if (Kind != k_Immediate)
778      return false;
779    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
780    if (!CE) return false;
781    int64_t Value = CE->getValue();
782    return Value > 0 && Value <= 32;
783  }
784  bool isARMSOImm() const {
785    if (Kind != k_Immediate)
786      return false;
787    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
788    if (!CE) return false;
789    int64_t Value = CE->getValue();
790    return ARM_AM::getSOImmVal(Value) != -1;
791  }
792  bool isARMSOImmNot() const {
793    if (Kind != k_Immediate)
794      return false;
795    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
796    if (!CE) return false;
797    int64_t Value = CE->getValue();
798    return ARM_AM::getSOImmVal(~Value) != -1;
799  }
800  bool isARMSOImmNeg() const {
801    if (Kind != k_Immediate)
802      return false;
803    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
804    if (!CE) return false;
805    int64_t Value = CE->getValue();
806    return ARM_AM::getSOImmVal(-Value) != -1;
807  }
808  bool isT2SOImm() const {
809    if (Kind != k_Immediate)
810      return false;
811    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
812    if (!CE) return false;
813    int64_t Value = CE->getValue();
814    return ARM_AM::getT2SOImmVal(Value) != -1;
815  }
816  bool isT2SOImmNot() const {
817    if (Kind != k_Immediate)
818      return false;
819    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
820    if (!CE) return false;
821    int64_t Value = CE->getValue();
822    return ARM_AM::getT2SOImmVal(~Value) != -1;
823  }
824  bool isT2SOImmNeg() const {
825    if (Kind != k_Immediate)
826      return false;
827    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828    if (!CE) return false;
829    int64_t Value = CE->getValue();
830    return ARM_AM::getT2SOImmVal(-Value) != -1;
831  }
832  bool isSetEndImm() const {
833    if (Kind != k_Immediate)
834      return false;
835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836    if (!CE) return false;
837    int64_t Value = CE->getValue();
838    return Value == 1 || Value == 0;
839  }
840  bool isReg() const { return Kind == k_Register; }
841  bool isRegList() const { return Kind == k_RegisterList; }
842  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
843  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
844  bool isToken() const { return Kind == k_Token; }
845  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
846  bool isMemory() const { return Kind == k_Memory; }
847  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
848  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
849  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
850  bool isRotImm() const { return Kind == k_RotateImmediate; }
851  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
852  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
853  bool isPostIdxReg() const {
854    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
855  }
856  bool isMemNoOffset(bool alignOK = false) const {
857    if (!isMemory())
858      return false;
859    // No offset of any kind.
860    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
861     (alignOK || Memory.Alignment == 0);
862  }
863  bool isAlignedMemory() const {
864    return isMemNoOffset(true);
865  }
866  bool isAddrMode2() const {
867    if (!isMemory() || Memory.Alignment != 0) return false;
868    // Check for register offset.
869    if (Memory.OffsetRegNum) return true;
870    // Immediate offset in range [-4095, 4095].
871    if (!Memory.OffsetImm) return true;
872    int64_t Val = Memory.OffsetImm->getValue();
873    return Val > -4096 && Val < 4096;
874  }
875  bool isAM2OffsetImm() const {
876    if (Kind != k_Immediate)
877      return false;
878    // Immediate offset in range [-4095, 4095].
879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
880    if (!CE) return false;
881    int64_t Val = CE->getValue();
882    return Val > -4096 && Val < 4096;
883  }
884  bool isAddrMode3() const {
885    // If we have an immediate that's not a constant, treat it as a label
886    // reference needing a fixup. If it is a constant, it's something else
887    // and we reject it.
888    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
889      return true;
890    if (!isMemory() || Memory.Alignment != 0) return false;
891    // No shifts are legal for AM3.
892    if (Memory.ShiftType != ARM_AM::no_shift) return false;
893    // Check for register offset.
894    if (Memory.OffsetRegNum) return true;
895    // Immediate offset in range [-255, 255].
896    if (!Memory.OffsetImm) return true;
897    int64_t Val = Memory.OffsetImm->getValue();
898    return Val > -256 && Val < 256;
899  }
900  bool isAM3Offset() const {
901    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
902      return false;
903    if (Kind == k_PostIndexRegister)
904      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
905    // Immediate offset in range [-255, 255].
906    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
907    if (!CE) return false;
908    int64_t Val = CE->getValue();
909    // Special case, #-0 is INT32_MIN.
910    return (Val > -256 && Val < 256) || Val == INT32_MIN;
911  }
912  bool isAddrMode5() const {
913    // If we have an immediate that's not a constant, treat it as a label
914    // reference needing a fixup. If it is a constant, it's something else
915    // and we reject it.
916    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
917      return true;
918    if (!isMemory() || Memory.Alignment != 0) return false;
919    // Check for register offset.
920    if (Memory.OffsetRegNum) return false;
921    // Immediate offset in range [-1020, 1020] and a multiple of 4.
922    if (!Memory.OffsetImm) return true;
923    int64_t Val = Memory.OffsetImm->getValue();
924    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
925      Val == INT32_MIN;
926  }
927  bool isMemTBB() const {
928    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
929        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
930      return false;
931    return true;
932  }
933  bool isMemTBH() const {
934    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
935        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
936        Memory.Alignment != 0 )
937      return false;
938    return true;
939  }
940  bool isMemRegOffset() const {
941    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
942      return false;
943    return true;
944  }
945  bool isT2MemRegOffset() const {
946    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
947        Memory.Alignment != 0)
948      return false;
949    // Only lsl #{0, 1, 2, 3} allowed.
950    if (Memory.ShiftType == ARM_AM::no_shift)
951      return true;
952    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
953      return false;
954    return true;
955  }
956  bool isMemThumbRR() const {
957    // Thumb reg+reg addressing is simple. Just two registers, a base and
958    // an offset. No shifts, negations or any other complicating factors.
959    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
960        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
961      return false;
962    return isARMLowRegister(Memory.BaseRegNum) &&
963      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
964  }
965  bool isMemThumbRIs4() const {
966    if (!isMemory() || Memory.OffsetRegNum != 0 ||
967        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
968      return false;
969    // Immediate offset, multiple of 4 in range [0, 124].
970    if (!Memory.OffsetImm) return true;
971    int64_t Val = Memory.OffsetImm->getValue();
972    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
973  }
974  bool isMemThumbRIs2() const {
975    if (!isMemory() || Memory.OffsetRegNum != 0 ||
976        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
977      return false;
978    // Immediate offset, multiple of 4 in range [0, 62].
979    if (!Memory.OffsetImm) return true;
980    int64_t Val = Memory.OffsetImm->getValue();
981    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
982  }
983  bool isMemThumbRIs1() const {
984    if (!isMemory() || Memory.OffsetRegNum != 0 ||
985        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
986      return false;
987    // Immediate offset in range [0, 31].
988    if (!Memory.OffsetImm) return true;
989    int64_t Val = Memory.OffsetImm->getValue();
990    return Val >= 0 && Val <= 31;
991  }
992  bool isMemThumbSPI() const {
993    if (!isMemory() || Memory.OffsetRegNum != 0 ||
994        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
995      return false;
996    // Immediate offset, multiple of 4 in range [0, 1020].
997    if (!Memory.OffsetImm) return true;
998    int64_t Val = Memory.OffsetImm->getValue();
999    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1000  }
1001  bool isMemImm8s4Offset() const {
1002    // If we have an immediate that's not a constant, treat it as a label
1003    // reference needing a fixup. If it is a constant, it's something else
1004    // and we reject it.
1005    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1006      return true;
1007    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1008      return false;
1009    // Immediate offset a multiple of 4 in range [-1020, 1020].
1010    if (!Memory.OffsetImm) return true;
1011    int64_t Val = Memory.OffsetImm->getValue();
1012    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1013  }
1014  bool isMemImm0_1020s4Offset() const {
1015    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1016      return false;
1017    // Immediate offset a multiple of 4 in range [0, 1020].
1018    if (!Memory.OffsetImm) return true;
1019    int64_t Val = Memory.OffsetImm->getValue();
1020    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1021  }
1022  bool isMemImm8Offset() const {
1023    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1024      return false;
1025    // Immediate offset in range [-255, 255].
1026    if (!Memory.OffsetImm) return true;
1027    int64_t Val = Memory.OffsetImm->getValue();
1028    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1029  }
1030  bool isMemPosImm8Offset() const {
1031    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1032      return false;
1033    // Immediate offset in range [0, 255].
1034    if (!Memory.OffsetImm) return true;
1035    int64_t Val = Memory.OffsetImm->getValue();
1036    return Val >= 0 && Val < 256;
1037  }
1038  bool isMemNegImm8Offset() const {
1039    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1040      return false;
1041    // Immediate offset in range [-255, -1].
1042    if (!Memory.OffsetImm) return false;
1043    int64_t Val = Memory.OffsetImm->getValue();
1044    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1045  }
1046  bool isMemUImm12Offset() const {
1047    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1048      return false;
1049    // Immediate offset in range [0, 4095].
1050    if (!Memory.OffsetImm) return true;
1051    int64_t Val = Memory.OffsetImm->getValue();
1052    return (Val >= 0 && Val < 4096);
1053  }
1054  bool isMemImm12Offset() const {
1055    // If we have an immediate that's not a constant, treat it as a label
1056    // reference needing a fixup. If it is a constant, it's something else
1057    // and we reject it.
1058    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1059      return true;
1060
1061    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1062      return false;
1063    // Immediate offset in range [-4095, 4095].
1064    if (!Memory.OffsetImm) return true;
1065    int64_t Val = Memory.OffsetImm->getValue();
1066    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1067  }
1068  bool isPostIdxImm8() const {
1069    if (Kind != k_Immediate)
1070      return false;
1071    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1072    if (!CE) return false;
1073    int64_t Val = CE->getValue();
1074    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1075  }
1076  bool isPostIdxImm8s4() const {
1077    if (Kind != k_Immediate)
1078      return false;
1079    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1080    if (!CE) return false;
1081    int64_t Val = CE->getValue();
1082    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1083      (Val == INT32_MIN);
1084  }
1085
1086  bool isMSRMask() const { return Kind == k_MSRMask; }
1087  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1088
1089  // NEON operands.
1090  bool isSingleSpacedVectorList() const {
1091    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1092  }
1093  bool isDoubleSpacedVectorList() const {
1094    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1095  }
1096  bool isVecListOneD() const {
1097    if (!isSingleSpacedVectorList()) return false;
1098    return VectorList.Count == 1;
1099  }
1100
1101  bool isVecListTwoD() const {
1102    if (!isSingleSpacedVectorList()) return false;
1103    return VectorList.Count == 2;
1104  }
1105
1106  bool isVecListThreeD() const {
1107    if (!isSingleSpacedVectorList()) return false;
1108    return VectorList.Count == 3;
1109  }
1110
1111  bool isVecListFourD() const {
1112    if (!isSingleSpacedVectorList()) return false;
1113    return VectorList.Count == 4;
1114  }
1115
1116  bool isVecListTwoQ() const {
1117    if (!isDoubleSpacedVectorList()) return false;
1118    return VectorList.Count == 2;
1119  }
1120
1121  bool isSingleSpacedVectorAllLanes() const {
1122    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1123  }
1124  bool isDoubleSpacedVectorAllLanes() const {
1125    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1126  }
1127  bool isVecListOneDAllLanes() const {
1128    if (!isSingleSpacedVectorAllLanes()) return false;
1129    return VectorList.Count == 1;
1130  }
1131
1132  bool isVecListTwoDAllLanes() const {
1133    if (!isSingleSpacedVectorAllLanes()) return false;
1134    return VectorList.Count == 2;
1135  }
1136
1137  bool isVecListTwoQAllLanes() const {
1138    if (!isDoubleSpacedVectorAllLanes()) return false;
1139    return VectorList.Count == 2;
1140  }
1141
1142  bool isSingleSpacedVectorIndexed() const {
1143    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1144  }
1145  bool isDoubleSpacedVectorIndexed() const {
1146    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1147  }
1148  bool isVecListOneDByteIndexed() const {
1149    if (!isSingleSpacedVectorIndexed()) return false;
1150    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1151  }
1152
1153  bool isVecListOneDHWordIndexed() const {
1154    if (!isSingleSpacedVectorIndexed()) return false;
1155    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1156  }
1157
1158  bool isVecListOneDWordIndexed() const {
1159    if (!isSingleSpacedVectorIndexed()) return false;
1160    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1161  }
1162
1163  bool isVecListTwoDByteIndexed() const {
1164    if (!isSingleSpacedVectorIndexed()) return false;
1165    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1166  }
1167
1168  bool isVecListTwoDHWordIndexed() const {
1169    if (!isSingleSpacedVectorIndexed()) return false;
1170    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1171  }
1172
1173  bool isVecListTwoQWordIndexed() const {
1174    if (!isDoubleSpacedVectorIndexed()) return false;
1175    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1176  }
1177
1178  bool isVecListTwoQHWordIndexed() const {
1179    if (!isDoubleSpacedVectorIndexed()) return false;
1180    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1181  }
1182
1183  bool isVecListTwoDWordIndexed() const {
1184    if (!isSingleSpacedVectorIndexed()) return false;
1185    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1186  }
1187
1188  bool isVectorIndex8() const {
1189    if (Kind != k_VectorIndex) return false;
1190    return VectorIndex.Val < 8;
1191  }
1192  bool isVectorIndex16() const {
1193    if (Kind != k_VectorIndex) return false;
1194    return VectorIndex.Val < 4;
1195  }
1196  bool isVectorIndex32() const {
1197    if (Kind != k_VectorIndex) return false;
1198    return VectorIndex.Val < 2;
1199  }
1200
1201  bool isNEONi8splat() const {
1202    if (Kind != k_Immediate)
1203      return false;
1204    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1205    // Must be a constant.
1206    if (!CE) return false;
1207    int64_t Value = CE->getValue();
1208    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1209    // value.
1210    return Value >= 0 && Value < 256;
1211  }
1212
1213  bool isNEONi16splat() const {
1214    if (Kind != k_Immediate)
1215      return false;
1216    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1217    // Must be a constant.
1218    if (!CE) return false;
1219    int64_t Value = CE->getValue();
1220    // i16 value in the range [0,255] or [0x0100, 0xff00]
1221    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1222  }
1223
1224  bool isNEONi32splat() const {
1225    if (Kind != k_Immediate)
1226      return false;
1227    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1228    // Must be a constant.
1229    if (!CE) return false;
1230    int64_t Value = CE->getValue();
1231    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1232    return (Value >= 0 && Value < 256) ||
1233      (Value >= 0x0100 && Value <= 0xff00) ||
1234      (Value >= 0x010000 && Value <= 0xff0000) ||
1235      (Value >= 0x01000000 && Value <= 0xff000000);
1236  }
1237
1238  bool isNEONi32vmov() const {
1239    if (Kind != k_Immediate)
1240      return false;
1241    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1242    // Must be a constant.
1243    if (!CE) return false;
1244    int64_t Value = CE->getValue();
1245    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1246    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1247    return (Value >= 0 && Value < 256) ||
1248      (Value >= 0x0100 && Value <= 0xff00) ||
1249      (Value >= 0x010000 && Value <= 0xff0000) ||
1250      (Value >= 0x01000000 && Value <= 0xff000000) ||
1251      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1252      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1253  }
1254  bool isNEONi32vmovNeg() const {
1255    if (Kind != k_Immediate)
1256      return false;
1257    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1258    // Must be a constant.
1259    if (!CE) return false;
1260    int64_t Value = ~CE->getValue();
1261    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1262    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1263    return (Value >= 0 && Value < 256) ||
1264      (Value >= 0x0100 && Value <= 0xff00) ||
1265      (Value >= 0x010000 && Value <= 0xff0000) ||
1266      (Value >= 0x01000000 && Value <= 0xff000000) ||
1267      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1268      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1269  }
1270
1271  bool isNEONi64splat() const {
1272    if (Kind != k_Immediate)
1273      return false;
1274    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1275    // Must be a constant.
1276    if (!CE) return false;
1277    uint64_t Value = CE->getValue();
1278    // i64 value with each byte being either 0 or 0xff.
1279    for (unsigned i = 0; i < 8; ++i)
1280      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1281    return true;
1282  }
1283
1284  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1285    // Add as immediates when possible.  Null MCExpr = 0.
1286    if (Expr == 0)
1287      Inst.addOperand(MCOperand::CreateImm(0));
1288    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1289      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1290    else
1291      Inst.addOperand(MCOperand::CreateExpr(Expr));
1292  }
1293
1294  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1295    assert(N == 2 && "Invalid number of operands!");
1296    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1297    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1298    Inst.addOperand(MCOperand::CreateReg(RegNum));
1299  }
1300
1301  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1302    assert(N == 1 && "Invalid number of operands!");
1303    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1304  }
1305
1306  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1307    assert(N == 1 && "Invalid number of operands!");
1308    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1309  }
1310
1311  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1312    assert(N == 1 && "Invalid number of operands!");
1313    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1314  }
1315
1316  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1317    assert(N == 1 && "Invalid number of operands!");
1318    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1319  }
1320
1321  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1322    assert(N == 1 && "Invalid number of operands!");
1323    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1324  }
1325
1326  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1327    assert(N == 1 && "Invalid number of operands!");
1328    Inst.addOperand(MCOperand::CreateReg(getReg()));
1329  }
1330
1331  void addRegOperands(MCInst &Inst, unsigned N) const {
1332    assert(N == 1 && "Invalid number of operands!");
1333    Inst.addOperand(MCOperand::CreateReg(getReg()));
1334  }
1335
1336  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1337    assert(N == 3 && "Invalid number of operands!");
1338    assert(isRegShiftedReg() &&
1339           "addRegShiftedRegOperands() on non RegShiftedReg!");
1340    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1341    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1342    Inst.addOperand(MCOperand::CreateImm(
1343      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1344  }
1345
1346  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1347    assert(N == 2 && "Invalid number of operands!");
1348    assert(isRegShiftedImm() &&
1349           "addRegShiftedImmOperands() on non RegShiftedImm!");
1350    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1351    Inst.addOperand(MCOperand::CreateImm(
1352      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1353  }
1354
1355  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1356    assert(N == 1 && "Invalid number of operands!");
1357    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1358                                         ShifterImm.Imm));
1359  }
1360
1361  void addRegListOperands(MCInst &Inst, unsigned N) const {
1362    assert(N == 1 && "Invalid number of operands!");
1363    const SmallVectorImpl<unsigned> &RegList = getRegList();
1364    for (SmallVectorImpl<unsigned>::const_iterator
1365           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1366      Inst.addOperand(MCOperand::CreateReg(*I));
1367  }
1368
1369  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1370    addRegListOperands(Inst, N);
1371  }
1372
1373  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1374    addRegListOperands(Inst, N);
1375  }
1376
1377  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1378    assert(N == 1 && "Invalid number of operands!");
1379    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1380    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1381  }
1382
1383  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    // Munge the lsb/width into a bitfield mask.
1386    unsigned lsb = Bitfield.LSB;
1387    unsigned width = Bitfield.Width;
1388    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1389    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1390                      (32 - (lsb + width)));
1391    Inst.addOperand(MCOperand::CreateImm(Mask));
1392  }
1393
1394  void addImmOperands(MCInst &Inst, unsigned N) const {
1395    assert(N == 1 && "Invalid number of operands!");
1396    addExpr(Inst, getImm());
1397  }
1398
1399  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1402  }
1403
1404  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1405    assert(N == 1 && "Invalid number of operands!");
1406    // FIXME: We really want to scale the value here, but the LDRD/STRD
1407    // instruction don't encode operands that way yet.
1408    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1409    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1410  }
1411
1412  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1413    assert(N == 1 && "Invalid number of operands!");
1414    // The immediate is scaled by four in the encoding and is stored
1415    // in the MCInst as such. Lop off the low two bits here.
1416    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1417    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1418  }
1419
1420  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1421    assert(N == 1 && "Invalid number of operands!");
1422    // The immediate is scaled by four in the encoding and is stored
1423    // in the MCInst as such. Lop off the low two bits here.
1424    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1425    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1426  }
1427
1428  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1429    assert(N == 1 && "Invalid number of operands!");
1430    // The constant encodes as the immediate-1, and we store in the instruction
1431    // the bits as encoded, so subtract off one here.
1432    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1433    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1434  }
1435
1436  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1437    assert(N == 1 && "Invalid number of operands!");
1438    // The constant encodes as the immediate-1, and we store in the instruction
1439    // the bits as encoded, so subtract off one here.
1440    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1441    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1442  }
1443
1444  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1445    assert(N == 1 && "Invalid number of operands!");
1446    // The constant encodes as the immediate, except for 32, which encodes as
1447    // zero.
1448    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1449    unsigned Imm = CE->getValue();
1450    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1451  }
1452
1453  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1454    assert(N == 1 && "Invalid number of operands!");
1455    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1456    // the instruction as well.
1457    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1458    int Val = CE->getValue();
1459    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1460  }
1461
1462  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1463    assert(N == 1 && "Invalid number of operands!");
1464    // The operand is actually a t2_so_imm, but we have its bitwise
1465    // negation in the assembly source, so twiddle it here.
1466    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1467    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1468  }
1469
1470  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1471    assert(N == 1 && "Invalid number of operands!");
1472    // The operand is actually a t2_so_imm, but we have its
1473    // negation in the assembly source, so twiddle it here.
1474    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1475    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1476  }
1477
1478  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1479    assert(N == 1 && "Invalid number of operands!");
1480    // The operand is actually a so_imm, but we have its bitwise
1481    // negation in the assembly source, so twiddle it here.
1482    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1483    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1484  }
1485
1486  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1487    assert(N == 1 && "Invalid number of operands!");
1488    // The operand is actually a so_imm, but we have its
1489    // negation in the assembly source, so twiddle it here.
1490    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1491    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1492  }
1493
1494  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1495    assert(N == 1 && "Invalid number of operands!");
1496    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1497  }
1498
1499  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1500    assert(N == 1 && "Invalid number of operands!");
1501    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1502  }
1503
1504  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1505    assert(N == 2 && "Invalid number of operands!");
1506    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1507    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1508  }
1509
1510  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1511    assert(N == 3 && "Invalid number of operands!");
1512    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1513    if (!Memory.OffsetRegNum) {
1514      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1515      // Special case for #-0
1516      if (Val == INT32_MIN) Val = 0;
1517      if (Val < 0) Val = -Val;
1518      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1519    } else {
1520      // For register offset, we encode the shift type and negation flag
1521      // here.
1522      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1523                              Memory.ShiftImm, Memory.ShiftType);
1524    }
1525    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1526    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1527    Inst.addOperand(MCOperand::CreateImm(Val));
1528  }
1529
1530  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1531    assert(N == 2 && "Invalid number of operands!");
1532    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1533    assert(CE && "non-constant AM2OffsetImm operand!");
1534    int32_t Val = CE->getValue();
1535    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1536    // Special case for #-0
1537    if (Val == INT32_MIN) Val = 0;
1538    if (Val < 0) Val = -Val;
1539    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1540    Inst.addOperand(MCOperand::CreateReg(0));
1541    Inst.addOperand(MCOperand::CreateImm(Val));
1542  }
1543
1544  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1545    assert(N == 3 && "Invalid number of operands!");
1546    // If we have an immediate that's not a constant, treat it as a label
1547    // reference needing a fixup. If it is a constant, it's something else
1548    // and we reject it.
1549    if (isImm()) {
1550      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1551      Inst.addOperand(MCOperand::CreateReg(0));
1552      Inst.addOperand(MCOperand::CreateImm(0));
1553      return;
1554    }
1555
1556    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1557    if (!Memory.OffsetRegNum) {
1558      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1559      // Special case for #-0
1560      if (Val == INT32_MIN) Val = 0;
1561      if (Val < 0) Val = -Val;
1562      Val = ARM_AM::getAM3Opc(AddSub, Val);
1563    } else {
1564      // For register offset, we encode the shift type and negation flag
1565      // here.
1566      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1567    }
1568    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1569    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1570    Inst.addOperand(MCOperand::CreateImm(Val));
1571  }
1572
1573  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1574    assert(N == 2 && "Invalid number of operands!");
1575    if (Kind == k_PostIndexRegister) {
1576      int32_t Val =
1577        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1578      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1579      Inst.addOperand(MCOperand::CreateImm(Val));
1580      return;
1581    }
1582
1583    // Constant offset.
1584    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1585    int32_t Val = CE->getValue();
1586    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1587    // Special case for #-0
1588    if (Val == INT32_MIN) Val = 0;
1589    if (Val < 0) Val = -Val;
1590    Val = ARM_AM::getAM3Opc(AddSub, Val);
1591    Inst.addOperand(MCOperand::CreateReg(0));
1592    Inst.addOperand(MCOperand::CreateImm(Val));
1593  }
1594
1595  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1596    assert(N == 2 && "Invalid number of operands!");
1597    // If we have an immediate that's not a constant, treat it as a label
1598    // reference needing a fixup. If it is a constant, it's something else
1599    // and we reject it.
1600    if (isImm()) {
1601      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1602      Inst.addOperand(MCOperand::CreateImm(0));
1603      return;
1604    }
1605
1606    // The lower two bits are always zero and as such are not encoded.
1607    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1608    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1609    // Special case for #-0
1610    if (Val == INT32_MIN) Val = 0;
1611    if (Val < 0) Val = -Val;
1612    Val = ARM_AM::getAM5Opc(AddSub, Val);
1613    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1614    Inst.addOperand(MCOperand::CreateImm(Val));
1615  }
1616
1617  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1618    assert(N == 2 && "Invalid number of operands!");
1619    // If we have an immediate that's not a constant, treat it as a label
1620    // reference needing a fixup. If it is a constant, it's something else
1621    // and we reject it.
1622    if (isImm()) {
1623      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1624      Inst.addOperand(MCOperand::CreateImm(0));
1625      return;
1626    }
1627
1628    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1629    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1630    Inst.addOperand(MCOperand::CreateImm(Val));
1631  }
1632
1633  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1634    assert(N == 2 && "Invalid number of operands!");
1635    // The lower two bits are always zero and as such are not encoded.
1636    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1637    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1638    Inst.addOperand(MCOperand::CreateImm(Val));
1639  }
1640
1641  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1642    assert(N == 2 && "Invalid number of operands!");
1643    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1644    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1645    Inst.addOperand(MCOperand::CreateImm(Val));
1646  }
1647
1648  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1649    addMemImm8OffsetOperands(Inst, N);
1650  }
1651
1652  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1653    addMemImm8OffsetOperands(Inst, N);
1654  }
1655
1656  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1657    assert(N == 2 && "Invalid number of operands!");
1658    // If this is an immediate, it's a label reference.
1659    if (Kind == k_Immediate) {
1660      addExpr(Inst, getImm());
1661      Inst.addOperand(MCOperand::CreateImm(0));
1662      return;
1663    }
1664
1665    // Otherwise, it's a normal memory reg+offset.
1666    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1667    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1668    Inst.addOperand(MCOperand::CreateImm(Val));
1669  }
1670
1671  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1672    assert(N == 2 && "Invalid number of operands!");
1673    // If this is an immediate, it's a label reference.
1674    if (Kind == k_Immediate) {
1675      addExpr(Inst, getImm());
1676      Inst.addOperand(MCOperand::CreateImm(0));
1677      return;
1678    }
1679
1680    // Otherwise, it's a normal memory reg+offset.
1681    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1682    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1683    Inst.addOperand(MCOperand::CreateImm(Val));
1684  }
1685
1686  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1687    assert(N == 2 && "Invalid number of operands!");
1688    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1689    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1690  }
1691
1692  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1693    assert(N == 2 && "Invalid number of operands!");
1694    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1695    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1696  }
1697
1698  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1699    assert(N == 3 && "Invalid number of operands!");
1700    unsigned Val =
1701      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1702                        Memory.ShiftImm, Memory.ShiftType);
1703    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1704    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1705    Inst.addOperand(MCOperand::CreateImm(Val));
1706  }
1707
1708  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1709    assert(N == 3 && "Invalid number of operands!");
1710    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1711    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1712    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1713  }
1714
1715  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1716    assert(N == 2 && "Invalid number of operands!");
1717    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1718    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1719  }
1720
1721  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1722    assert(N == 2 && "Invalid number of operands!");
1723    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1724    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1725    Inst.addOperand(MCOperand::CreateImm(Val));
1726  }
1727
1728  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1729    assert(N == 2 && "Invalid number of operands!");
1730    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1731    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1732    Inst.addOperand(MCOperand::CreateImm(Val));
1733  }
1734
1735  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1736    assert(N == 2 && "Invalid number of operands!");
1737    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1738    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1739    Inst.addOperand(MCOperand::CreateImm(Val));
1740  }
1741
1742  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1743    assert(N == 2 && "Invalid number of operands!");
1744    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1745    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1746    Inst.addOperand(MCOperand::CreateImm(Val));
1747  }
1748
1749  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1750    assert(N == 1 && "Invalid number of operands!");
1751    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1752    assert(CE && "non-constant post-idx-imm8 operand!");
1753    int Imm = CE->getValue();
1754    bool isAdd = Imm >= 0;
1755    if (Imm == INT32_MIN) Imm = 0;
1756    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1757    Inst.addOperand(MCOperand::CreateImm(Imm));
1758  }
1759
1760  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1761    assert(N == 1 && "Invalid number of operands!");
1762    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1763    assert(CE && "non-constant post-idx-imm8s4 operand!");
1764    int Imm = CE->getValue();
1765    bool isAdd = Imm >= 0;
1766    if (Imm == INT32_MIN) Imm = 0;
1767    // Immediate is scaled by 4.
1768    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1769    Inst.addOperand(MCOperand::CreateImm(Imm));
1770  }
1771
1772  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1773    assert(N == 2 && "Invalid number of operands!");
1774    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1775    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1776  }
1777
1778  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1779    assert(N == 2 && "Invalid number of operands!");
1780    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1781    // The sign, shift type, and shift amount are encoded in a single operand
1782    // using the AM2 encoding helpers.
1783    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1784    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1785                                     PostIdxReg.ShiftTy);
1786    Inst.addOperand(MCOperand::CreateImm(Imm));
1787  }
1788
1789  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1790    assert(N == 1 && "Invalid number of operands!");
1791    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1792  }
1793
1794  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 1 && "Invalid number of operands!");
1796    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1797  }
1798
1799  void addVecListOperands(MCInst &Inst, unsigned N) const {
1800    assert(N == 1 && "Invalid number of operands!");
1801    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1802  }
1803
1804  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1805    assert(N == 2 && "Invalid number of operands!");
1806    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1807    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1808  }
1809
1810  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1811    assert(N == 1 && "Invalid number of operands!");
1812    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1813  }
1814
1815  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1816    assert(N == 1 && "Invalid number of operands!");
1817    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1818  }
1819
1820  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1821    assert(N == 1 && "Invalid number of operands!");
1822    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1823  }
1824
1825  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1826    assert(N == 1 && "Invalid number of operands!");
1827    // The immediate encodes the type of constant as well as the value.
1828    // Mask in that this is an i8 splat.
1829    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1830    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1831  }
1832
1833  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1834    assert(N == 1 && "Invalid number of operands!");
1835    // The immediate encodes the type of constant as well as the value.
1836    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1837    unsigned Value = CE->getValue();
1838    if (Value >= 256)
1839      Value = (Value >> 8) | 0xa00;
1840    else
1841      Value |= 0x800;
1842    Inst.addOperand(MCOperand::CreateImm(Value));
1843  }
1844
1845  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1846    assert(N == 1 && "Invalid number of operands!");
1847    // The immediate encodes the type of constant as well as the value.
1848    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1849    unsigned Value = CE->getValue();
1850    if (Value >= 256 && Value <= 0xff00)
1851      Value = (Value >> 8) | 0x200;
1852    else if (Value > 0xffff && Value <= 0xff0000)
1853      Value = (Value >> 16) | 0x400;
1854    else if (Value > 0xffffff)
1855      Value = (Value >> 24) | 0x600;
1856    Inst.addOperand(MCOperand::CreateImm(Value));
1857  }
1858
1859  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1860    assert(N == 1 && "Invalid number of operands!");
1861    // The immediate encodes the type of constant as well as the value.
1862    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1863    unsigned Value = CE->getValue();
1864    if (Value >= 256 && Value <= 0xffff)
1865      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1866    else if (Value > 0xffff && Value <= 0xffffff)
1867      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1868    else if (Value > 0xffffff)
1869      Value = (Value >> 24) | 0x600;
1870    Inst.addOperand(MCOperand::CreateImm(Value));
1871  }
1872
1873  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1874    assert(N == 1 && "Invalid number of operands!");
1875    // The immediate encodes the type of constant as well as the value.
1876    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1877    unsigned Value = ~CE->getValue();
1878    if (Value >= 256 && Value <= 0xffff)
1879      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1880    else if (Value > 0xffff && Value <= 0xffffff)
1881      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1882    else if (Value > 0xffffff)
1883      Value = (Value >> 24) | 0x600;
1884    Inst.addOperand(MCOperand::CreateImm(Value));
1885  }
1886
1887  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1888    assert(N == 1 && "Invalid number of operands!");
1889    // The immediate encodes the type of constant as well as the value.
1890    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1891    uint64_t Value = CE->getValue();
1892    unsigned Imm = 0;
1893    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1894      Imm |= (Value & 1) << i;
1895    }
1896    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1897  }
1898
1899  virtual void print(raw_ostream &OS) const;
1900
1901  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1902    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1903    Op->ITMask.Mask = Mask;
1904    Op->StartLoc = S;
1905    Op->EndLoc = S;
1906    return Op;
1907  }
1908
1909  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1910    ARMOperand *Op = new ARMOperand(k_CondCode);
1911    Op->CC.Val = CC;
1912    Op->StartLoc = S;
1913    Op->EndLoc = S;
1914    return Op;
1915  }
1916
1917  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1918    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1919    Op->Cop.Val = CopVal;
1920    Op->StartLoc = S;
1921    Op->EndLoc = S;
1922    return Op;
1923  }
1924
1925  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1926    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1927    Op->Cop.Val = CopVal;
1928    Op->StartLoc = S;
1929    Op->EndLoc = S;
1930    return Op;
1931  }
1932
1933  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1934    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1935    Op->Cop.Val = Val;
1936    Op->StartLoc = S;
1937    Op->EndLoc = E;
1938    return Op;
1939  }
1940
1941  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1942    ARMOperand *Op = new ARMOperand(k_CCOut);
1943    Op->Reg.RegNum = RegNum;
1944    Op->StartLoc = S;
1945    Op->EndLoc = S;
1946    return Op;
1947  }
1948
1949  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1950    ARMOperand *Op = new ARMOperand(k_Token);
1951    Op->Tok.Data = Str.data();
1952    Op->Tok.Length = Str.size();
1953    Op->StartLoc = S;
1954    Op->EndLoc = S;
1955    return Op;
1956  }
1957
1958  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1959    ARMOperand *Op = new ARMOperand(k_Register);
1960    Op->Reg.RegNum = RegNum;
1961    Op->StartLoc = S;
1962    Op->EndLoc = E;
1963    return Op;
1964  }
1965
1966  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1967                                           unsigned SrcReg,
1968                                           unsigned ShiftReg,
1969                                           unsigned ShiftImm,
1970                                           SMLoc S, SMLoc E) {
1971    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1972    Op->RegShiftedReg.ShiftTy = ShTy;
1973    Op->RegShiftedReg.SrcReg = SrcReg;
1974    Op->RegShiftedReg.ShiftReg = ShiftReg;
1975    Op->RegShiftedReg.ShiftImm = ShiftImm;
1976    Op->StartLoc = S;
1977    Op->EndLoc = E;
1978    return Op;
1979  }
1980
1981  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1982                                            unsigned SrcReg,
1983                                            unsigned ShiftImm,
1984                                            SMLoc S, SMLoc E) {
1985    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1986    Op->RegShiftedImm.ShiftTy = ShTy;
1987    Op->RegShiftedImm.SrcReg = SrcReg;
1988    Op->RegShiftedImm.ShiftImm = ShiftImm;
1989    Op->StartLoc = S;
1990    Op->EndLoc = E;
1991    return Op;
1992  }
1993
1994  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1995                                   SMLoc S, SMLoc E) {
1996    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1997    Op->ShifterImm.isASR = isASR;
1998    Op->ShifterImm.Imm = Imm;
1999    Op->StartLoc = S;
2000    Op->EndLoc = E;
2001    return Op;
2002  }
2003
2004  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2005    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2006    Op->RotImm.Imm = Imm;
2007    Op->StartLoc = S;
2008    Op->EndLoc = E;
2009    return Op;
2010  }
2011
2012  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2013                                    SMLoc S, SMLoc E) {
2014    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2015    Op->Bitfield.LSB = LSB;
2016    Op->Bitfield.Width = Width;
2017    Op->StartLoc = S;
2018    Op->EndLoc = E;
2019    return Op;
2020  }
2021
2022  static ARMOperand *
2023  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2024                SMLoc StartLoc, SMLoc EndLoc) {
2025    KindTy Kind = k_RegisterList;
2026
2027    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2028      Kind = k_DPRRegisterList;
2029    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2030             contains(Regs.front().first))
2031      Kind = k_SPRRegisterList;
2032
2033    ARMOperand *Op = new ARMOperand(Kind);
2034    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2035           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2036      Op->Registers.push_back(I->first);
2037    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2038    Op->StartLoc = StartLoc;
2039    Op->EndLoc = EndLoc;
2040    return Op;
2041  }
2042
2043  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2044                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2045    ARMOperand *Op = new ARMOperand(k_VectorList);
2046    Op->VectorList.RegNum = RegNum;
2047    Op->VectorList.Count = Count;
2048    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2049    Op->StartLoc = S;
2050    Op->EndLoc = E;
2051    return Op;
2052  }
2053
2054  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2055                                              bool isDoubleSpaced,
2056                                              SMLoc S, SMLoc E) {
2057    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2058    Op->VectorList.RegNum = RegNum;
2059    Op->VectorList.Count = Count;
2060    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2061    Op->StartLoc = S;
2062    Op->EndLoc = E;
2063    return Op;
2064  }
2065
2066  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2067                                             unsigned Index,
2068                                             bool isDoubleSpaced,
2069                                             SMLoc S, SMLoc E) {
2070    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2071    Op->VectorList.RegNum = RegNum;
2072    Op->VectorList.Count = Count;
2073    Op->VectorList.LaneIndex = Index;
2074    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2075    Op->StartLoc = S;
2076    Op->EndLoc = E;
2077    return Op;
2078  }
2079
2080  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2081                                       MCContext &Ctx) {
2082    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2083    Op->VectorIndex.Val = Idx;
2084    Op->StartLoc = S;
2085    Op->EndLoc = E;
2086    return Op;
2087  }
2088
2089  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2090    ARMOperand *Op = new ARMOperand(k_Immediate);
2091    Op->Imm.Val = Val;
2092    Op->StartLoc = S;
2093    Op->EndLoc = E;
2094    return Op;
2095  }
2096
2097  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2098    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2099    Op->FPImm.Val = Val;
2100    Op->StartLoc = S;
2101    Op->EndLoc = S;
2102    return Op;
2103  }
2104
2105  static ARMOperand *CreateMem(unsigned BaseRegNum,
2106                               const MCConstantExpr *OffsetImm,
2107                               unsigned OffsetRegNum,
2108                               ARM_AM::ShiftOpc ShiftType,
2109                               unsigned ShiftImm,
2110                               unsigned Alignment,
2111                               bool isNegative,
2112                               SMLoc S, SMLoc E) {
2113    ARMOperand *Op = new ARMOperand(k_Memory);
2114    Op->Memory.BaseRegNum = BaseRegNum;
2115    Op->Memory.OffsetImm = OffsetImm;
2116    Op->Memory.OffsetRegNum = OffsetRegNum;
2117    Op->Memory.ShiftType = ShiftType;
2118    Op->Memory.ShiftImm = ShiftImm;
2119    Op->Memory.Alignment = Alignment;
2120    Op->Memory.isNegative = isNegative;
2121    Op->StartLoc = S;
2122    Op->EndLoc = E;
2123    return Op;
2124  }
2125
2126  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2127                                      ARM_AM::ShiftOpc ShiftTy,
2128                                      unsigned ShiftImm,
2129                                      SMLoc S, SMLoc E) {
2130    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2131    Op->PostIdxReg.RegNum = RegNum;
2132    Op->PostIdxReg.isAdd = isAdd;
2133    Op->PostIdxReg.ShiftTy = ShiftTy;
2134    Op->PostIdxReg.ShiftImm = ShiftImm;
2135    Op->StartLoc = S;
2136    Op->EndLoc = E;
2137    return Op;
2138  }
2139
2140  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2141    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2142    Op->MBOpt.Val = Opt;
2143    Op->StartLoc = S;
2144    Op->EndLoc = S;
2145    return Op;
2146  }
2147
2148  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2149    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2150    Op->IFlags.Val = IFlags;
2151    Op->StartLoc = S;
2152    Op->EndLoc = S;
2153    return Op;
2154  }
2155
2156  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2157    ARMOperand *Op = new ARMOperand(k_MSRMask);
2158    Op->MMask.Val = MMask;
2159    Op->StartLoc = S;
2160    Op->EndLoc = S;
2161    return Op;
2162  }
2163};
2164
2165} // end anonymous namespace.
2166
2167void ARMOperand::print(raw_ostream &OS) const {
2168  switch (Kind) {
2169  case k_FPImmediate:
2170    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2171       << ") >";
2172    break;
2173  case k_CondCode:
2174    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2175    break;
2176  case k_CCOut:
2177    OS << "<ccout " << getReg() << ">";
2178    break;
2179  case k_ITCondMask: {
2180    static const char *MaskStr[] = {
2181      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2182      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2183    };
2184    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2185    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2186    break;
2187  }
2188  case k_CoprocNum:
2189    OS << "<coprocessor number: " << getCoproc() << ">";
2190    break;
2191  case k_CoprocReg:
2192    OS << "<coprocessor register: " << getCoproc() << ">";
2193    break;
2194  case k_CoprocOption:
2195    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2196    break;
2197  case k_MSRMask:
2198    OS << "<mask: " << getMSRMask() << ">";
2199    break;
2200  case k_Immediate:
2201    getImm()->print(OS);
2202    break;
2203  case k_MemBarrierOpt:
2204    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2205    break;
2206  case k_Memory:
2207    OS << "<memory "
2208       << " base:" << Memory.BaseRegNum;
2209    OS << ">";
2210    break;
2211  case k_PostIndexRegister:
2212    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2213       << PostIdxReg.RegNum;
2214    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2215      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2216         << PostIdxReg.ShiftImm;
2217    OS << ">";
2218    break;
2219  case k_ProcIFlags: {
2220    OS << "<ARM_PROC::";
2221    unsigned IFlags = getProcIFlags();
2222    for (int i=2; i >= 0; --i)
2223      if (IFlags & (1 << i))
2224        OS << ARM_PROC::IFlagsToString(1 << i);
2225    OS << ">";
2226    break;
2227  }
2228  case k_Register:
2229    OS << "<register " << getReg() << ">";
2230    break;
2231  case k_ShifterImmediate:
2232    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2233       << " #" << ShifterImm.Imm << ">";
2234    break;
2235  case k_ShiftedRegister:
2236    OS << "<so_reg_reg "
2237       << RegShiftedReg.SrcReg << " "
2238       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2239       << " " << RegShiftedReg.ShiftReg << ">";
2240    break;
2241  case k_ShiftedImmediate:
2242    OS << "<so_reg_imm "
2243       << RegShiftedImm.SrcReg << " "
2244       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2245       << " #" << RegShiftedImm.ShiftImm << ">";
2246    break;
2247  case k_RotateImmediate:
2248    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2249    break;
2250  case k_BitfieldDescriptor:
2251    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2252       << ", width: " << Bitfield.Width << ">";
2253    break;
2254  case k_RegisterList:
2255  case k_DPRRegisterList:
2256  case k_SPRRegisterList: {
2257    OS << "<register_list ";
2258
2259    const SmallVectorImpl<unsigned> &RegList = getRegList();
2260    for (SmallVectorImpl<unsigned>::const_iterator
2261           I = RegList.begin(), E = RegList.end(); I != E; ) {
2262      OS << *I;
2263      if (++I < E) OS << ", ";
2264    }
2265
2266    OS << ">";
2267    break;
2268  }
2269  case k_VectorList:
2270    OS << "<vector_list " << VectorList.Count << " * "
2271       << VectorList.RegNum << ">";
2272    break;
2273  case k_VectorListAllLanes:
2274    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2275       << VectorList.RegNum << ">";
2276    break;
2277  case k_VectorListIndexed:
2278    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2279       << VectorList.Count << " * " << VectorList.RegNum << ">";
2280    break;
2281  case k_Token:
2282    OS << "'" << getToken() << "'";
2283    break;
2284  case k_VectorIndex:
2285    OS << "<vectorindex " << getVectorIndex() << ">";
2286    break;
2287  }
2288}
2289
2290/// @name Auto-generated Match Functions
2291/// {
2292
2293static unsigned MatchRegisterName(StringRef Name);
2294
2295/// }
2296
2297bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2298                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2299  StartLoc = Parser.getTok().getLoc();
2300  RegNo = tryParseRegister();
2301  EndLoc = Parser.getTok().getLoc();
2302
2303  return (RegNo == (unsigned)-1);
2304}
2305
2306/// Try to parse a register name.  The token must be an Identifier when called,
2307/// and if it is a register name the token is eaten and the register number is
2308/// returned.  Otherwise return -1.
2309///
2310int ARMAsmParser::tryParseRegister() {
2311  const AsmToken &Tok = Parser.getTok();
2312  if (Tok.isNot(AsmToken::Identifier)) return -1;
2313
2314  std::string lowerCase = Tok.getString().lower();
2315  unsigned RegNum = MatchRegisterName(lowerCase);
2316  if (!RegNum) {
2317    RegNum = StringSwitch<unsigned>(lowerCase)
2318      .Case("r13", ARM::SP)
2319      .Case("r14", ARM::LR)
2320      .Case("r15", ARM::PC)
2321      .Case("ip", ARM::R12)
2322      // Additional register name aliases for 'gas' compatibility.
2323      .Case("a1", ARM::R0)
2324      .Case("a2", ARM::R1)
2325      .Case("a3", ARM::R2)
2326      .Case("a4", ARM::R3)
2327      .Case("v1", ARM::R4)
2328      .Case("v2", ARM::R5)
2329      .Case("v3", ARM::R6)
2330      .Case("v4", ARM::R7)
2331      .Case("v5", ARM::R8)
2332      .Case("v6", ARM::R9)
2333      .Case("v7", ARM::R10)
2334      .Case("v8", ARM::R11)
2335      .Case("sb", ARM::R9)
2336      .Case("sl", ARM::R10)
2337      .Case("fp", ARM::R11)
2338      .Default(0);
2339  }
2340  if (!RegNum) {
2341    // Check for aliases registered via .req. Canonicalize to lower case.
2342    // That's more consistent since register names are case insensitive, and
2343    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2344    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2345    // If no match, return failure.
2346    if (Entry == RegisterReqs.end())
2347      return -1;
2348    Parser.Lex(); // Eat identifier token.
2349    return Entry->getValue();
2350  }
2351
2352  Parser.Lex(); // Eat identifier token.
2353
2354  return RegNum;
2355}
2356
2357// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2358// If a recoverable error occurs, return 1. If an irrecoverable error
2359// occurs, return -1. An irrecoverable error is one where tokens have been
2360// consumed in the process of trying to parse the shifter (i.e., when it is
2361// indeed a shifter operand, but malformed).
2362int ARMAsmParser::tryParseShiftRegister(
2363                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2364  SMLoc S = Parser.getTok().getLoc();
2365  const AsmToken &Tok = Parser.getTok();
2366  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2367
2368  std::string lowerCase = Tok.getString().lower();
2369  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2370      .Case("asl", ARM_AM::lsl)
2371      .Case("lsl", ARM_AM::lsl)
2372      .Case("lsr", ARM_AM::lsr)
2373      .Case("asr", ARM_AM::asr)
2374      .Case("ror", ARM_AM::ror)
2375      .Case("rrx", ARM_AM::rrx)
2376      .Default(ARM_AM::no_shift);
2377
2378  if (ShiftTy == ARM_AM::no_shift)
2379    return 1;
2380
2381  Parser.Lex(); // Eat the operator.
2382
2383  // The source register for the shift has already been added to the
2384  // operand list, so we need to pop it off and combine it into the shifted
2385  // register operand instead.
2386  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2387  if (!PrevOp->isReg())
2388    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2389  int SrcReg = PrevOp->getReg();
2390  int64_t Imm = 0;
2391  int ShiftReg = 0;
2392  if (ShiftTy == ARM_AM::rrx) {
2393    // RRX Doesn't have an explicit shift amount. The encoder expects
2394    // the shift register to be the same as the source register. Seems odd,
2395    // but OK.
2396    ShiftReg = SrcReg;
2397  } else {
2398    // Figure out if this is shifted by a constant or a register (for non-RRX).
2399    if (Parser.getTok().is(AsmToken::Hash) ||
2400        Parser.getTok().is(AsmToken::Dollar)) {
2401      Parser.Lex(); // Eat hash.
2402      SMLoc ImmLoc = Parser.getTok().getLoc();
2403      const MCExpr *ShiftExpr = 0;
2404      if (getParser().ParseExpression(ShiftExpr)) {
2405        Error(ImmLoc, "invalid immediate shift value");
2406        return -1;
2407      }
2408      // The expression must be evaluatable as an immediate.
2409      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2410      if (!CE) {
2411        Error(ImmLoc, "invalid immediate shift value");
2412        return -1;
2413      }
2414      // Range check the immediate.
2415      // lsl, ror: 0 <= imm <= 31
2416      // lsr, asr: 0 <= imm <= 32
2417      Imm = CE->getValue();
2418      if (Imm < 0 ||
2419          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2420          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2421        Error(ImmLoc, "immediate shift value out of range");
2422        return -1;
2423      }
2424      // shift by zero is a nop. Always send it through as lsl.
2425      // ('as' compatibility)
2426      if (Imm == 0)
2427        ShiftTy = ARM_AM::lsl;
2428    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2429      ShiftReg = tryParseRegister();
2430      SMLoc L = Parser.getTok().getLoc();
2431      if (ShiftReg == -1) {
2432        Error (L, "expected immediate or register in shift operand");
2433        return -1;
2434      }
2435    } else {
2436      Error (Parser.getTok().getLoc(),
2437                    "expected immediate or register in shift operand");
2438      return -1;
2439    }
2440  }
2441
2442  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2443    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2444                                                         ShiftReg, Imm,
2445                                               S, Parser.getTok().getLoc()));
2446  else
2447    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2448                                               S, Parser.getTok().getLoc()));
2449
2450  return 0;
2451}
2452
2453
2454/// Try to parse a register name.  The token must be an Identifier when called.
2455/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2456/// if there is a "writeback". 'true' if it's not a register.
2457///
2458/// TODO this is likely to change to allow different register types and or to
2459/// parse for a specific register type.
2460bool ARMAsmParser::
2461tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2462  SMLoc S = Parser.getTok().getLoc();
2463  int RegNo = tryParseRegister();
2464  if (RegNo == -1)
2465    return true;
2466
2467  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2468
2469  const AsmToken &ExclaimTok = Parser.getTok();
2470  if (ExclaimTok.is(AsmToken::Exclaim)) {
2471    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2472                                               ExclaimTok.getLoc()));
2473    Parser.Lex(); // Eat exclaim token
2474    return false;
2475  }
2476
2477  // Also check for an index operand. This is only legal for vector registers,
2478  // but that'll get caught OK in operand matching, so we don't need to
2479  // explicitly filter everything else out here.
2480  if (Parser.getTok().is(AsmToken::LBrac)) {
2481    SMLoc SIdx = Parser.getTok().getLoc();
2482    Parser.Lex(); // Eat left bracket token.
2483
2484    const MCExpr *ImmVal;
2485    if (getParser().ParseExpression(ImmVal))
2486      return MatchOperand_ParseFail;
2487    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2488    if (!MCE) {
2489      TokError("immediate value expected for vector index");
2490      return MatchOperand_ParseFail;
2491    }
2492
2493    SMLoc E = Parser.getTok().getLoc();
2494    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2495      Error(E, "']' expected");
2496      return MatchOperand_ParseFail;
2497    }
2498
2499    Parser.Lex(); // Eat right bracket token.
2500
2501    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2502                                                     SIdx, E,
2503                                                     getContext()));
2504  }
2505
2506  return false;
2507}
2508
2509/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2510/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2511/// "c5", ...
2512static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2513  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2514  // but efficient.
2515  switch (Name.size()) {
2516  default: break;
2517  case 2:
2518    if (Name[0] != CoprocOp)
2519      return -1;
2520    switch (Name[1]) {
2521    default:  return -1;
2522    case '0': return 0;
2523    case '1': return 1;
2524    case '2': return 2;
2525    case '3': return 3;
2526    case '4': return 4;
2527    case '5': return 5;
2528    case '6': return 6;
2529    case '7': return 7;
2530    case '8': return 8;
2531    case '9': return 9;
2532    }
2533    break;
2534  case 3:
2535    if (Name[0] != CoprocOp || Name[1] != '1')
2536      return -1;
2537    switch (Name[2]) {
2538    default:  return -1;
2539    case '0': return 10;
2540    case '1': return 11;
2541    case '2': return 12;
2542    case '3': return 13;
2543    case '4': return 14;
2544    case '5': return 15;
2545    }
2546    break;
2547  }
2548
2549  return -1;
2550}
2551
2552/// parseITCondCode - Try to parse a condition code for an IT instruction.
2553ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2554parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2555  SMLoc S = Parser.getTok().getLoc();
2556  const AsmToken &Tok = Parser.getTok();
2557  if (!Tok.is(AsmToken::Identifier))
2558    return MatchOperand_NoMatch;
2559  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2560    .Case("eq", ARMCC::EQ)
2561    .Case("ne", ARMCC::NE)
2562    .Case("hs", ARMCC::HS)
2563    .Case("cs", ARMCC::HS)
2564    .Case("lo", ARMCC::LO)
2565    .Case("cc", ARMCC::LO)
2566    .Case("mi", ARMCC::MI)
2567    .Case("pl", ARMCC::PL)
2568    .Case("vs", ARMCC::VS)
2569    .Case("vc", ARMCC::VC)
2570    .Case("hi", ARMCC::HI)
2571    .Case("ls", ARMCC::LS)
2572    .Case("ge", ARMCC::GE)
2573    .Case("lt", ARMCC::LT)
2574    .Case("gt", ARMCC::GT)
2575    .Case("le", ARMCC::LE)
2576    .Case("al", ARMCC::AL)
2577    .Default(~0U);
2578  if (CC == ~0U)
2579    return MatchOperand_NoMatch;
2580  Parser.Lex(); // Eat the token.
2581
2582  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2583
2584  return MatchOperand_Success;
2585}
2586
2587/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2588/// token must be an Identifier when called, and if it is a coprocessor
2589/// number, the token is eaten and the operand is added to the operand list.
2590ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2591parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2592  SMLoc S = Parser.getTok().getLoc();
2593  const AsmToken &Tok = Parser.getTok();
2594  if (Tok.isNot(AsmToken::Identifier))
2595    return MatchOperand_NoMatch;
2596
2597  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2598  if (Num == -1)
2599    return MatchOperand_NoMatch;
2600
2601  Parser.Lex(); // Eat identifier token.
2602  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2603  return MatchOperand_Success;
2604}
2605
2606/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2607/// token must be an Identifier when called, and if it is a coprocessor
2608/// number, the token is eaten and the operand is added to the operand list.
2609ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2610parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2611  SMLoc S = Parser.getTok().getLoc();
2612  const AsmToken &Tok = Parser.getTok();
2613  if (Tok.isNot(AsmToken::Identifier))
2614    return MatchOperand_NoMatch;
2615
2616  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2617  if (Reg == -1)
2618    return MatchOperand_NoMatch;
2619
2620  Parser.Lex(); // Eat identifier token.
2621  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2622  return MatchOperand_Success;
2623}
2624
2625/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2626/// coproc_option : '{' imm0_255 '}'
2627ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2628parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2629  SMLoc S = Parser.getTok().getLoc();
2630
2631  // If this isn't a '{', this isn't a coprocessor immediate operand.
2632  if (Parser.getTok().isNot(AsmToken::LCurly))
2633    return MatchOperand_NoMatch;
2634  Parser.Lex(); // Eat the '{'
2635
2636  const MCExpr *Expr;
2637  SMLoc Loc = Parser.getTok().getLoc();
2638  if (getParser().ParseExpression(Expr)) {
2639    Error(Loc, "illegal expression");
2640    return MatchOperand_ParseFail;
2641  }
2642  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2643  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2644    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2645    return MatchOperand_ParseFail;
2646  }
2647  int Val = CE->getValue();
2648
2649  // Check for and consume the closing '}'
2650  if (Parser.getTok().isNot(AsmToken::RCurly))
2651    return MatchOperand_ParseFail;
2652  SMLoc E = Parser.getTok().getLoc();
2653  Parser.Lex(); // Eat the '}'
2654
2655  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2656  return MatchOperand_Success;
2657}
2658
2659// For register list parsing, we need to map from raw GPR register numbering
2660// to the enumeration values. The enumeration values aren't sorted by
2661// register number due to our using "sp", "lr" and "pc" as canonical names.
2662static unsigned getNextRegister(unsigned Reg) {
2663  // If this is a GPR, we need to do it manually, otherwise we can rely
2664  // on the sort ordering of the enumeration since the other reg-classes
2665  // are sane.
2666  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2667    return Reg + 1;
2668  switch(Reg) {
2669  default: assert(0 && "Invalid GPR number!");
2670  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2671  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2672  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2673  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2674  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2675  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2676  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2677  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2678  }
2679}
2680
2681// Return the low-subreg of a given Q register.
2682static unsigned getDRegFromQReg(unsigned QReg) {
2683  switch (QReg) {
2684  default: llvm_unreachable("expected a Q register!");
2685  case ARM::Q0:  return ARM::D0;
2686  case ARM::Q1:  return ARM::D2;
2687  case ARM::Q2:  return ARM::D4;
2688  case ARM::Q3:  return ARM::D6;
2689  case ARM::Q4:  return ARM::D8;
2690  case ARM::Q5:  return ARM::D10;
2691  case ARM::Q6:  return ARM::D12;
2692  case ARM::Q7:  return ARM::D14;
2693  case ARM::Q8:  return ARM::D16;
2694  case ARM::Q9:  return ARM::D18;
2695  case ARM::Q10: return ARM::D20;
2696  case ARM::Q11: return ARM::D22;
2697  case ARM::Q12: return ARM::D24;
2698  case ARM::Q13: return ARM::D26;
2699  case ARM::Q14: return ARM::D28;
2700  case ARM::Q15: return ARM::D30;
2701  }
2702}
2703
2704/// Parse a register list.
2705bool ARMAsmParser::
2706parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2707  assert(Parser.getTok().is(AsmToken::LCurly) &&
2708         "Token is not a Left Curly Brace");
2709  SMLoc S = Parser.getTok().getLoc();
2710  Parser.Lex(); // Eat '{' token.
2711  SMLoc RegLoc = Parser.getTok().getLoc();
2712
2713  // Check the first register in the list to see what register class
2714  // this is a list of.
2715  int Reg = tryParseRegister();
2716  if (Reg == -1)
2717    return Error(RegLoc, "register expected");
2718
2719  // The reglist instructions have at most 16 registers, so reserve
2720  // space for that many.
2721  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2722
2723  // Allow Q regs and just interpret them as the two D sub-registers.
2724  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2725    Reg = getDRegFromQReg(Reg);
2726    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2727    ++Reg;
2728  }
2729  const MCRegisterClass *RC;
2730  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2731    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2732  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2733    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2734  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2735    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2736  else
2737    return Error(RegLoc, "invalid register in register list");
2738
2739  // Store the register.
2740  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2741
2742  // This starts immediately after the first register token in the list,
2743  // so we can see either a comma or a minus (range separator) as a legal
2744  // next token.
2745  while (Parser.getTok().is(AsmToken::Comma) ||
2746         Parser.getTok().is(AsmToken::Minus)) {
2747    if (Parser.getTok().is(AsmToken::Minus)) {
2748      Parser.Lex(); // Eat the minus.
2749      SMLoc EndLoc = Parser.getTok().getLoc();
2750      int EndReg = tryParseRegister();
2751      if (EndReg == -1)
2752        return Error(EndLoc, "register expected");
2753      // Allow Q regs and just interpret them as the two D sub-registers.
2754      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2755        EndReg = getDRegFromQReg(EndReg) + 1;
2756      // If the register is the same as the start reg, there's nothing
2757      // more to do.
2758      if (Reg == EndReg)
2759        continue;
2760      // The register must be in the same register class as the first.
2761      if (!RC->contains(EndReg))
2762        return Error(EndLoc, "invalid register in register list");
2763      // Ranges must go from low to high.
2764      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2765        return Error(EndLoc, "bad range in register list");
2766
2767      // Add all the registers in the range to the register list.
2768      while (Reg != EndReg) {
2769        Reg = getNextRegister(Reg);
2770        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2771      }
2772      continue;
2773    }
2774    Parser.Lex(); // Eat the comma.
2775    RegLoc = Parser.getTok().getLoc();
2776    int OldReg = Reg;
2777    const AsmToken RegTok = Parser.getTok();
2778    Reg = tryParseRegister();
2779    if (Reg == -1)
2780      return Error(RegLoc, "register expected");
2781    // Allow Q regs and just interpret them as the two D sub-registers.
2782    bool isQReg = false;
2783    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2784      Reg = getDRegFromQReg(Reg);
2785      isQReg = true;
2786    }
2787    // The register must be in the same register class as the first.
2788    if (!RC->contains(Reg))
2789      return Error(RegLoc, "invalid register in register list");
2790    // List must be monotonically increasing.
2791    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2792      return Error(RegLoc, "register list not in ascending order");
2793    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2794      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2795              ") in register list");
2796      continue;
2797    }
2798    // VFP register lists must also be contiguous.
2799    // It's OK to use the enumeration values directly here rather, as the
2800    // VFP register classes have the enum sorted properly.
2801    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2802        Reg != OldReg + 1)
2803      return Error(RegLoc, "non-contiguous register range");
2804    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2805    if (isQReg)
2806      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2807  }
2808
2809  SMLoc E = Parser.getTok().getLoc();
2810  if (Parser.getTok().isNot(AsmToken::RCurly))
2811    return Error(E, "'}' expected");
2812  Parser.Lex(); // Eat '}' token.
2813
2814  // Push the register list operand.
2815  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2816
2817  // The ARM system instruction variants for LDM/STM have a '^' token here.
2818  if (Parser.getTok().is(AsmToken::Caret)) {
2819    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2820    Parser.Lex(); // Eat '^' token.
2821  }
2822
2823  return false;
2824}
2825
2826// Helper function to parse the lane index for vector lists.
2827ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2828parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2829  Index = 0; // Always return a defined index value.
2830  if (Parser.getTok().is(AsmToken::LBrac)) {
2831    Parser.Lex(); // Eat the '['.
2832    if (Parser.getTok().is(AsmToken::RBrac)) {
2833      // "Dn[]" is the 'all lanes' syntax.
2834      LaneKind = AllLanes;
2835      Parser.Lex(); // Eat the ']'.
2836      return MatchOperand_Success;
2837    }
2838    const MCExpr *LaneIndex;
2839    SMLoc Loc = Parser.getTok().getLoc();
2840    if (getParser().ParseExpression(LaneIndex)) {
2841      Error(Loc, "illegal expression");
2842      return MatchOperand_ParseFail;
2843    }
2844    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2845    if (!CE) {
2846      Error(Loc, "lane index must be empty or an integer");
2847      return MatchOperand_ParseFail;
2848    }
2849    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2850      Error(Parser.getTok().getLoc(), "']' expected");
2851      return MatchOperand_ParseFail;
2852    }
2853    Parser.Lex(); // Eat the ']'.
2854    int64_t Val = CE->getValue();
2855
2856    // FIXME: Make this range check context sensitive for .8, .16, .32.
2857    if (Val < 0 || Val > 7) {
2858      Error(Parser.getTok().getLoc(), "lane index out of range");
2859      return MatchOperand_ParseFail;
2860    }
2861    Index = Val;
2862    LaneKind = IndexedLane;
2863    return MatchOperand_Success;
2864  }
2865  LaneKind = NoLanes;
2866  return MatchOperand_Success;
2867}
2868
2869// parse a vector register list
2870ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2871parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2872  VectorLaneTy LaneKind;
2873  unsigned LaneIndex;
2874  SMLoc S = Parser.getTok().getLoc();
2875  // As an extension (to match gas), support a plain D register or Q register
2876  // (without encosing curly braces) as a single or double entry list,
2877  // respectively.
2878  if (Parser.getTok().is(AsmToken::Identifier)) {
2879    int Reg = tryParseRegister();
2880    if (Reg == -1)
2881      return MatchOperand_NoMatch;
2882    SMLoc E = Parser.getTok().getLoc();
2883    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2884      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2885      if (Res != MatchOperand_Success)
2886        return Res;
2887      switch (LaneKind) {
2888      default:
2889        assert(0 && "unexpected lane kind!");
2890      case NoLanes:
2891        E = Parser.getTok().getLoc();
2892        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2893        break;
2894      case AllLanes:
2895        E = Parser.getTok().getLoc();
2896        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2897                                                                S, E));
2898        break;
2899      case IndexedLane:
2900        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2901                                                               LaneIndex,
2902                                                               false, S, E));
2903        break;
2904      }
2905      return MatchOperand_Success;
2906    }
2907    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2908      Reg = getDRegFromQReg(Reg);
2909      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2910      if (Res != MatchOperand_Success)
2911        return Res;
2912      switch (LaneKind) {
2913      default:
2914        assert(0 && "unexpected lane kind!");
2915      case NoLanes:
2916        E = Parser.getTok().getLoc();
2917        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2918        break;
2919      case AllLanes:
2920        E = Parser.getTok().getLoc();
2921        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2922                                                                S, E));
2923        break;
2924      case IndexedLane:
2925        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2926                                                               LaneIndex,
2927                                                               false, S, E));
2928        break;
2929      }
2930      return MatchOperand_Success;
2931    }
2932    Error(S, "vector register expected");
2933    return MatchOperand_ParseFail;
2934  }
2935
2936  if (Parser.getTok().isNot(AsmToken::LCurly))
2937    return MatchOperand_NoMatch;
2938
2939  Parser.Lex(); // Eat '{' token.
2940  SMLoc RegLoc = Parser.getTok().getLoc();
2941
2942  int Reg = tryParseRegister();
2943  if (Reg == -1) {
2944    Error(RegLoc, "register expected");
2945    return MatchOperand_ParseFail;
2946  }
2947  unsigned Count = 1;
2948  int Spacing = 0;
2949  unsigned FirstReg = Reg;
2950  // The list is of D registers, but we also allow Q regs and just interpret
2951  // them as the two D sub-registers.
2952  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2953    FirstReg = Reg = getDRegFromQReg(Reg);
2954    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2955                 // it's ambiguous with four-register single spaced.
2956    ++Reg;
2957    ++Count;
2958  }
2959  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2960    return MatchOperand_ParseFail;
2961
2962  while (Parser.getTok().is(AsmToken::Comma) ||
2963         Parser.getTok().is(AsmToken::Minus)) {
2964    if (Parser.getTok().is(AsmToken::Minus)) {
2965      if (!Spacing)
2966        Spacing = 1; // Register range implies a single spaced list.
2967      else if (Spacing == 2) {
2968        Error(Parser.getTok().getLoc(),
2969              "sequential registers in double spaced list");
2970        return MatchOperand_ParseFail;
2971      }
2972      Parser.Lex(); // Eat the minus.
2973      SMLoc EndLoc = Parser.getTok().getLoc();
2974      int EndReg = tryParseRegister();
2975      if (EndReg == -1) {
2976        Error(EndLoc, "register expected");
2977        return MatchOperand_ParseFail;
2978      }
2979      // Allow Q regs and just interpret them as the two D sub-registers.
2980      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2981        EndReg = getDRegFromQReg(EndReg) + 1;
2982      // If the register is the same as the start reg, there's nothing
2983      // more to do.
2984      if (Reg == EndReg)
2985        continue;
2986      // The register must be in the same register class as the first.
2987      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2988        Error(EndLoc, "invalid register in register list");
2989        return MatchOperand_ParseFail;
2990      }
2991      // Ranges must go from low to high.
2992      if (Reg > EndReg) {
2993        Error(EndLoc, "bad range in register list");
2994        return MatchOperand_ParseFail;
2995      }
2996      // Parse the lane specifier if present.
2997      VectorLaneTy NextLaneKind;
2998      unsigned NextLaneIndex;
2999      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3000        return MatchOperand_ParseFail;
3001      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3002        Error(EndLoc, "mismatched lane index in register list");
3003        return MatchOperand_ParseFail;
3004      }
3005      EndLoc = Parser.getTok().getLoc();
3006
3007      // Add all the registers in the range to the register list.
3008      Count += EndReg - Reg;
3009      Reg = EndReg;
3010      continue;
3011    }
3012    Parser.Lex(); // Eat the comma.
3013    RegLoc = Parser.getTok().getLoc();
3014    int OldReg = Reg;
3015    Reg = tryParseRegister();
3016    if (Reg == -1) {
3017      Error(RegLoc, "register expected");
3018      return MatchOperand_ParseFail;
3019    }
3020    // vector register lists must be contiguous.
3021    // It's OK to use the enumeration values directly here rather, as the
3022    // VFP register classes have the enum sorted properly.
3023    //
3024    // The list is of D registers, but we also allow Q regs and just interpret
3025    // them as the two D sub-registers.
3026    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3027      if (!Spacing)
3028        Spacing = 1; // Register range implies a single spaced list.
3029      else if (Spacing == 2) {
3030        Error(RegLoc,
3031              "invalid register in double-spaced list (must be 'D' register')");
3032        return MatchOperand_ParseFail;
3033      }
3034      Reg = getDRegFromQReg(Reg);
3035      if (Reg != OldReg + 1) {
3036        Error(RegLoc, "non-contiguous register range");
3037        return MatchOperand_ParseFail;
3038      }
3039      ++Reg;
3040      Count += 2;
3041      // Parse the lane specifier if present.
3042      VectorLaneTy NextLaneKind;
3043      unsigned NextLaneIndex;
3044      SMLoc EndLoc = Parser.getTok().getLoc();
3045      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3046        return MatchOperand_ParseFail;
3047      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3048        Error(EndLoc, "mismatched lane index in register list");
3049        return MatchOperand_ParseFail;
3050      }
3051      continue;
3052    }
3053    // Normal D register.
3054    // Figure out the register spacing (single or double) of the list if
3055    // we don't know it already.
3056    if (!Spacing)
3057      Spacing = 1 + (Reg == OldReg + 2);
3058
3059    // Just check that it's contiguous and keep going.
3060    if (Reg != OldReg + Spacing) {
3061      Error(RegLoc, "non-contiguous register range");
3062      return MatchOperand_ParseFail;
3063    }
3064    ++Count;
3065    // Parse the lane specifier if present.
3066    VectorLaneTy NextLaneKind;
3067    unsigned NextLaneIndex;
3068    SMLoc EndLoc = Parser.getTok().getLoc();
3069    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3070      return MatchOperand_ParseFail;
3071    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3072      Error(EndLoc, "mismatched lane index in register list");
3073      return MatchOperand_ParseFail;
3074    }
3075  }
3076
3077  SMLoc E = Parser.getTok().getLoc();
3078  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3079    Error(E, "'}' expected");
3080    return MatchOperand_ParseFail;
3081  }
3082  Parser.Lex(); // Eat '}' token.
3083
3084  switch (LaneKind) {
3085  default:
3086    assert(0 && "unexpected lane kind in register list.");
3087  case NoLanes:
3088    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3089                                                    (Spacing == 2), S, E));
3090    break;
3091  case AllLanes:
3092    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3093                                                            (Spacing == 2),
3094                                                            S, E));
3095    break;
3096  case IndexedLane:
3097    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3098                                                           LaneIndex,
3099                                                           (Spacing == 2),
3100                                                           S, E));
3101    break;
3102  }
3103  return MatchOperand_Success;
3104}
3105
3106/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3107ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3108parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3109  SMLoc S = Parser.getTok().getLoc();
3110  const AsmToken &Tok = Parser.getTok();
3111  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3112  StringRef OptStr = Tok.getString();
3113
3114  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3115    .Case("sy",    ARM_MB::SY)
3116    .Case("st",    ARM_MB::ST)
3117    .Case("sh",    ARM_MB::ISH)
3118    .Case("ish",   ARM_MB::ISH)
3119    .Case("shst",  ARM_MB::ISHST)
3120    .Case("ishst", ARM_MB::ISHST)
3121    .Case("nsh",   ARM_MB::NSH)
3122    .Case("un",    ARM_MB::NSH)
3123    .Case("nshst", ARM_MB::NSHST)
3124    .Case("unst",  ARM_MB::NSHST)
3125    .Case("osh",   ARM_MB::OSH)
3126    .Case("oshst", ARM_MB::OSHST)
3127    .Default(~0U);
3128
3129  if (Opt == ~0U)
3130    return MatchOperand_NoMatch;
3131
3132  Parser.Lex(); // Eat identifier token.
3133  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3134  return MatchOperand_Success;
3135}
3136
3137/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3138ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3139parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3140  SMLoc S = Parser.getTok().getLoc();
3141  const AsmToken &Tok = Parser.getTok();
3142  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3143  StringRef IFlagsStr = Tok.getString();
3144
3145  // An iflags string of "none" is interpreted to mean that none of the AIF
3146  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3147  unsigned IFlags = 0;
3148  if (IFlagsStr != "none") {
3149        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3150      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3151        .Case("a", ARM_PROC::A)
3152        .Case("i", ARM_PROC::I)
3153        .Case("f", ARM_PROC::F)
3154        .Default(~0U);
3155
3156      // If some specific iflag is already set, it means that some letter is
3157      // present more than once, this is not acceptable.
3158      if (Flag == ~0U || (IFlags & Flag))
3159        return MatchOperand_NoMatch;
3160
3161      IFlags |= Flag;
3162    }
3163  }
3164
3165  Parser.Lex(); // Eat identifier token.
3166  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3167  return MatchOperand_Success;
3168}
3169
3170/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3171ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3172parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3173  SMLoc S = Parser.getTok().getLoc();
3174  const AsmToken &Tok = Parser.getTok();
3175  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3176  StringRef Mask = Tok.getString();
3177
3178  if (isMClass()) {
3179    // See ARMv6-M 10.1.1
3180    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3181      .Case("apsr", 0)
3182      .Case("iapsr", 1)
3183      .Case("eapsr", 2)
3184      .Case("xpsr", 3)
3185      .Case("ipsr", 5)
3186      .Case("epsr", 6)
3187      .Case("iepsr", 7)
3188      .Case("msp", 8)
3189      .Case("psp", 9)
3190      .Case("primask", 16)
3191      .Case("basepri", 17)
3192      .Case("basepri_max", 18)
3193      .Case("faultmask", 19)
3194      .Case("control", 20)
3195      .Default(~0U);
3196
3197    if (FlagsVal == ~0U)
3198      return MatchOperand_NoMatch;
3199
3200    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3201      // basepri, basepri_max and faultmask only valid for V7m.
3202      return MatchOperand_NoMatch;
3203
3204    Parser.Lex(); // Eat identifier token.
3205    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3206    return MatchOperand_Success;
3207  }
3208
3209  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3210  size_t Start = 0, Next = Mask.find('_');
3211  StringRef Flags = "";
3212  std::string SpecReg = Mask.slice(Start, Next).lower();
3213  if (Next != StringRef::npos)
3214    Flags = Mask.slice(Next+1, Mask.size());
3215
3216  // FlagsVal contains the complete mask:
3217  // 3-0: Mask
3218  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3219  unsigned FlagsVal = 0;
3220
3221  if (SpecReg == "apsr") {
3222    FlagsVal = StringSwitch<unsigned>(Flags)
3223    .Case("nzcvq",  0x8) // same as CPSR_f
3224    .Case("g",      0x4) // same as CPSR_s
3225    .Case("nzcvqg", 0xc) // same as CPSR_fs
3226    .Default(~0U);
3227
3228    if (FlagsVal == ~0U) {
3229      if (!Flags.empty())
3230        return MatchOperand_NoMatch;
3231      else
3232        FlagsVal = 8; // No flag
3233    }
3234  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3235    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3236      Flags = "fc";
3237    for (int i = 0, e = Flags.size(); i != e; ++i) {
3238      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3239      .Case("c", 1)
3240      .Case("x", 2)
3241      .Case("s", 4)
3242      .Case("f", 8)
3243      .Default(~0U);
3244
3245      // If some specific flag is already set, it means that some letter is
3246      // present more than once, this is not acceptable.
3247      if (FlagsVal == ~0U || (FlagsVal & Flag))
3248        return MatchOperand_NoMatch;
3249      FlagsVal |= Flag;
3250    }
3251  } else // No match for special register.
3252    return MatchOperand_NoMatch;
3253
3254  // Special register without flags is NOT equivalent to "fc" flags.
3255  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3256  // two lines would enable gas compatibility at the expense of breaking
3257  // round-tripping.
3258  //
3259  // if (!FlagsVal)
3260  //  FlagsVal = 0x9;
3261
3262  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3263  if (SpecReg == "spsr")
3264    FlagsVal |= 16;
3265
3266  Parser.Lex(); // Eat identifier token.
3267  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3268  return MatchOperand_Success;
3269}
3270
3271ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3272parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3273            int Low, int High) {
3274  const AsmToken &Tok = Parser.getTok();
3275  if (Tok.isNot(AsmToken::Identifier)) {
3276    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3277    return MatchOperand_ParseFail;
3278  }
3279  StringRef ShiftName = Tok.getString();
3280  std::string LowerOp = Op.lower();
3281  std::string UpperOp = Op.upper();
3282  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3283    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3284    return MatchOperand_ParseFail;
3285  }
3286  Parser.Lex(); // Eat shift type token.
3287
3288  // There must be a '#' and a shift amount.
3289  if (Parser.getTok().isNot(AsmToken::Hash) &&
3290      Parser.getTok().isNot(AsmToken::Dollar)) {
3291    Error(Parser.getTok().getLoc(), "'#' expected");
3292    return MatchOperand_ParseFail;
3293  }
3294  Parser.Lex(); // Eat hash token.
3295
3296  const MCExpr *ShiftAmount;
3297  SMLoc Loc = Parser.getTok().getLoc();
3298  if (getParser().ParseExpression(ShiftAmount)) {
3299    Error(Loc, "illegal expression");
3300    return MatchOperand_ParseFail;
3301  }
3302  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3303  if (!CE) {
3304    Error(Loc, "constant expression expected");
3305    return MatchOperand_ParseFail;
3306  }
3307  int Val = CE->getValue();
3308  if (Val < Low || Val > High) {
3309    Error(Loc, "immediate value out of range");
3310    return MatchOperand_ParseFail;
3311  }
3312
3313  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3314
3315  return MatchOperand_Success;
3316}
3317
3318ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3319parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3320  const AsmToken &Tok = Parser.getTok();
3321  SMLoc S = Tok.getLoc();
3322  if (Tok.isNot(AsmToken::Identifier)) {
3323    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3324    return MatchOperand_ParseFail;
3325  }
3326  int Val = StringSwitch<int>(Tok.getString())
3327    .Case("be", 1)
3328    .Case("le", 0)
3329    .Default(-1);
3330  Parser.Lex(); // Eat the token.
3331
3332  if (Val == -1) {
3333    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3334    return MatchOperand_ParseFail;
3335  }
3336  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3337                                                                  getContext()),
3338                                           S, Parser.getTok().getLoc()));
3339  return MatchOperand_Success;
3340}
3341
3342/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3343/// instructions. Legal values are:
3344///     lsl #n  'n' in [0,31]
3345///     asr #n  'n' in [1,32]
3346///             n == 32 encoded as n == 0.
3347ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3348parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3349  const AsmToken &Tok = Parser.getTok();
3350  SMLoc S = Tok.getLoc();
3351  if (Tok.isNot(AsmToken::Identifier)) {
3352    Error(S, "shift operator 'asr' or 'lsl' expected");
3353    return MatchOperand_ParseFail;
3354  }
3355  StringRef ShiftName = Tok.getString();
3356  bool isASR;
3357  if (ShiftName == "lsl" || ShiftName == "LSL")
3358    isASR = false;
3359  else if (ShiftName == "asr" || ShiftName == "ASR")
3360    isASR = true;
3361  else {
3362    Error(S, "shift operator 'asr' or 'lsl' expected");
3363    return MatchOperand_ParseFail;
3364  }
3365  Parser.Lex(); // Eat the operator.
3366
3367  // A '#' and a shift amount.
3368  if (Parser.getTok().isNot(AsmToken::Hash) &&
3369      Parser.getTok().isNot(AsmToken::Dollar)) {
3370    Error(Parser.getTok().getLoc(), "'#' expected");
3371    return MatchOperand_ParseFail;
3372  }
3373  Parser.Lex(); // Eat hash token.
3374
3375  const MCExpr *ShiftAmount;
3376  SMLoc E = Parser.getTok().getLoc();
3377  if (getParser().ParseExpression(ShiftAmount)) {
3378    Error(E, "malformed shift expression");
3379    return MatchOperand_ParseFail;
3380  }
3381  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3382  if (!CE) {
3383    Error(E, "shift amount must be an immediate");
3384    return MatchOperand_ParseFail;
3385  }
3386
3387  int64_t Val = CE->getValue();
3388  if (isASR) {
3389    // Shift amount must be in [1,32]
3390    if (Val < 1 || Val > 32) {
3391      Error(E, "'asr' shift amount must be in range [1,32]");
3392      return MatchOperand_ParseFail;
3393    }
3394    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3395    if (isThumb() && Val == 32) {
3396      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3397      return MatchOperand_ParseFail;
3398    }
3399    if (Val == 32) Val = 0;
3400  } else {
3401    // Shift amount must be in [1,32]
3402    if (Val < 0 || Val > 31) {
3403      Error(E, "'lsr' shift amount must be in range [0,31]");
3404      return MatchOperand_ParseFail;
3405    }
3406  }
3407
3408  E = Parser.getTok().getLoc();
3409  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3410
3411  return MatchOperand_Success;
3412}
3413
3414/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3415/// of instructions. Legal values are:
3416///     ror #n  'n' in {0, 8, 16, 24}
3417ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3418parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3419  const AsmToken &Tok = Parser.getTok();
3420  SMLoc S = Tok.getLoc();
3421  if (Tok.isNot(AsmToken::Identifier))
3422    return MatchOperand_NoMatch;
3423  StringRef ShiftName = Tok.getString();
3424  if (ShiftName != "ror" && ShiftName != "ROR")
3425    return MatchOperand_NoMatch;
3426  Parser.Lex(); // Eat the operator.
3427
3428  // A '#' and a rotate amount.
3429  if (Parser.getTok().isNot(AsmToken::Hash) &&
3430      Parser.getTok().isNot(AsmToken::Dollar)) {
3431    Error(Parser.getTok().getLoc(), "'#' expected");
3432    return MatchOperand_ParseFail;
3433  }
3434  Parser.Lex(); // Eat hash token.
3435
3436  const MCExpr *ShiftAmount;
3437  SMLoc E = Parser.getTok().getLoc();
3438  if (getParser().ParseExpression(ShiftAmount)) {
3439    Error(E, "malformed rotate expression");
3440    return MatchOperand_ParseFail;
3441  }
3442  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3443  if (!CE) {
3444    Error(E, "rotate amount must be an immediate");
3445    return MatchOperand_ParseFail;
3446  }
3447
3448  int64_t Val = CE->getValue();
3449  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3450  // normally, zero is represented in asm by omitting the rotate operand
3451  // entirely.
3452  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3453    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3454    return MatchOperand_ParseFail;
3455  }
3456
3457  E = Parser.getTok().getLoc();
3458  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3459
3460  return MatchOperand_Success;
3461}
3462
3463ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3464parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3465  SMLoc S = Parser.getTok().getLoc();
3466  // The bitfield descriptor is really two operands, the LSB and the width.
3467  if (Parser.getTok().isNot(AsmToken::Hash) &&
3468      Parser.getTok().isNot(AsmToken::Dollar)) {
3469    Error(Parser.getTok().getLoc(), "'#' expected");
3470    return MatchOperand_ParseFail;
3471  }
3472  Parser.Lex(); // Eat hash token.
3473
3474  const MCExpr *LSBExpr;
3475  SMLoc E = Parser.getTok().getLoc();
3476  if (getParser().ParseExpression(LSBExpr)) {
3477    Error(E, "malformed immediate expression");
3478    return MatchOperand_ParseFail;
3479  }
3480  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3481  if (!CE) {
3482    Error(E, "'lsb' operand must be an immediate");
3483    return MatchOperand_ParseFail;
3484  }
3485
3486  int64_t LSB = CE->getValue();
3487  // The LSB must be in the range [0,31]
3488  if (LSB < 0 || LSB > 31) {
3489    Error(E, "'lsb' operand must be in the range [0,31]");
3490    return MatchOperand_ParseFail;
3491  }
3492  E = Parser.getTok().getLoc();
3493
3494  // Expect another immediate operand.
3495  if (Parser.getTok().isNot(AsmToken::Comma)) {
3496    Error(Parser.getTok().getLoc(), "too few operands");
3497    return MatchOperand_ParseFail;
3498  }
3499  Parser.Lex(); // Eat hash token.
3500  if (Parser.getTok().isNot(AsmToken::Hash) &&
3501      Parser.getTok().isNot(AsmToken::Dollar)) {
3502    Error(Parser.getTok().getLoc(), "'#' expected");
3503    return MatchOperand_ParseFail;
3504  }
3505  Parser.Lex(); // Eat hash token.
3506
3507  const MCExpr *WidthExpr;
3508  if (getParser().ParseExpression(WidthExpr)) {
3509    Error(E, "malformed immediate expression");
3510    return MatchOperand_ParseFail;
3511  }
3512  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3513  if (!CE) {
3514    Error(E, "'width' operand must be an immediate");
3515    return MatchOperand_ParseFail;
3516  }
3517
3518  int64_t Width = CE->getValue();
3519  // The LSB must be in the range [1,32-lsb]
3520  if (Width < 1 || Width > 32 - LSB) {
3521    Error(E, "'width' operand must be in the range [1,32-lsb]");
3522    return MatchOperand_ParseFail;
3523  }
3524  E = Parser.getTok().getLoc();
3525
3526  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3527
3528  return MatchOperand_Success;
3529}
3530
3531ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3532parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3533  // Check for a post-index addressing register operand. Specifically:
3534  // postidx_reg := '+' register {, shift}
3535  //              | '-' register {, shift}
3536  //              | register {, shift}
3537
3538  // This method must return MatchOperand_NoMatch without consuming any tokens
3539  // in the case where there is no match, as other alternatives take other
3540  // parse methods.
3541  AsmToken Tok = Parser.getTok();
3542  SMLoc S = Tok.getLoc();
3543  bool haveEaten = false;
3544  bool isAdd = true;
3545  int Reg = -1;
3546  if (Tok.is(AsmToken::Plus)) {
3547    Parser.Lex(); // Eat the '+' token.
3548    haveEaten = true;
3549  } else if (Tok.is(AsmToken::Minus)) {
3550    Parser.Lex(); // Eat the '-' token.
3551    isAdd = false;
3552    haveEaten = true;
3553  }
3554  if (Parser.getTok().is(AsmToken::Identifier))
3555    Reg = tryParseRegister();
3556  if (Reg == -1) {
3557    if (!haveEaten)
3558      return MatchOperand_NoMatch;
3559    Error(Parser.getTok().getLoc(), "register expected");
3560    return MatchOperand_ParseFail;
3561  }
3562  SMLoc E = Parser.getTok().getLoc();
3563
3564  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3565  unsigned ShiftImm = 0;
3566  if (Parser.getTok().is(AsmToken::Comma)) {
3567    Parser.Lex(); // Eat the ','.
3568    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3569      return MatchOperand_ParseFail;
3570  }
3571
3572  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3573                                                  ShiftImm, S, E));
3574
3575  return MatchOperand_Success;
3576}
3577
3578ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3579parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3580  // Check for a post-index addressing register operand. Specifically:
3581  // am3offset := '+' register
3582  //              | '-' register
3583  //              | register
3584  //              | # imm
3585  //              | # + imm
3586  //              | # - imm
3587
3588  // This method must return MatchOperand_NoMatch without consuming any tokens
3589  // in the case where there is no match, as other alternatives take other
3590  // parse methods.
3591  AsmToken Tok = Parser.getTok();
3592  SMLoc S = Tok.getLoc();
3593
3594  // Do immediates first, as we always parse those if we have a '#'.
3595  if (Parser.getTok().is(AsmToken::Hash) ||
3596      Parser.getTok().is(AsmToken::Dollar)) {
3597    Parser.Lex(); // Eat the '#'.
3598    // Explicitly look for a '-', as we need to encode negative zero
3599    // differently.
3600    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3601    const MCExpr *Offset;
3602    if (getParser().ParseExpression(Offset))
3603      return MatchOperand_ParseFail;
3604    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3605    if (!CE) {
3606      Error(S, "constant expression expected");
3607      return MatchOperand_ParseFail;
3608    }
3609    SMLoc E = Tok.getLoc();
3610    // Negative zero is encoded as the flag value INT32_MIN.
3611    int32_t Val = CE->getValue();
3612    if (isNegative && Val == 0)
3613      Val = INT32_MIN;
3614
3615    Operands.push_back(
3616      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3617
3618    return MatchOperand_Success;
3619  }
3620
3621
3622  bool haveEaten = false;
3623  bool isAdd = true;
3624  int Reg = -1;
3625  if (Tok.is(AsmToken::Plus)) {
3626    Parser.Lex(); // Eat the '+' token.
3627    haveEaten = true;
3628  } else if (Tok.is(AsmToken::Minus)) {
3629    Parser.Lex(); // Eat the '-' token.
3630    isAdd = false;
3631    haveEaten = true;
3632  }
3633  if (Parser.getTok().is(AsmToken::Identifier))
3634    Reg = tryParseRegister();
3635  if (Reg == -1) {
3636    if (!haveEaten)
3637      return MatchOperand_NoMatch;
3638    Error(Parser.getTok().getLoc(), "register expected");
3639    return MatchOperand_ParseFail;
3640  }
3641  SMLoc E = Parser.getTok().getLoc();
3642
3643  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3644                                                  0, S, E));
3645
3646  return MatchOperand_Success;
3647}
3648
3649/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3650/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3651/// when they refer multiple MIOperands inside a single one.
3652bool ARMAsmParser::
3653cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3654             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3655  // Rt, Rt2
3656  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3657  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3658  // Create a writeback register dummy placeholder.
3659  Inst.addOperand(MCOperand::CreateReg(0));
3660  // addr
3661  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3662  // pred
3663  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3664  return true;
3665}
3666
3667/// cvtT2StrdPre - Convert parsed operands to MCInst.
3668/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3669/// when they refer multiple MIOperands inside a single one.
3670bool ARMAsmParser::
3671cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3672             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3673  // Create a writeback register dummy placeholder.
3674  Inst.addOperand(MCOperand::CreateReg(0));
3675  // Rt, Rt2
3676  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3677  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3678  // addr
3679  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3680  // pred
3681  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3682  return true;
3683}
3684
3685/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3686/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3687/// when they refer multiple MIOperands inside a single one.
3688bool ARMAsmParser::
3689cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3690                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3691  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3692
3693  // Create a writeback register dummy placeholder.
3694  Inst.addOperand(MCOperand::CreateImm(0));
3695
3696  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3697  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3698  return true;
3699}
3700
3701/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3702/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3703/// when they refer multiple MIOperands inside a single one.
3704bool ARMAsmParser::
3705cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3706                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3707  // Create a writeback register dummy placeholder.
3708  Inst.addOperand(MCOperand::CreateImm(0));
3709  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3710  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3711  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3712  return true;
3713}
3714
3715/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3716/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3717/// when they refer multiple MIOperands inside a single one.
3718bool ARMAsmParser::
3719cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3720                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3721  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3722
3723  // Create a writeback register dummy placeholder.
3724  Inst.addOperand(MCOperand::CreateImm(0));
3725
3726  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3727  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3728  return true;
3729}
3730
3731/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3732/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3733/// when they refer multiple MIOperands inside a single one.
3734bool ARMAsmParser::
3735cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3736                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3737  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3738
3739  // Create a writeback register dummy placeholder.
3740  Inst.addOperand(MCOperand::CreateImm(0));
3741
3742  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3743  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3744  return true;
3745}
3746
3747
3748/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3749/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3750/// when they refer multiple MIOperands inside a single one.
3751bool ARMAsmParser::
3752cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3753                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3754  // Create a writeback register dummy placeholder.
3755  Inst.addOperand(MCOperand::CreateImm(0));
3756  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3757  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3758  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3759  return true;
3760}
3761
3762/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3763/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3764/// when they refer multiple MIOperands inside a single one.
3765bool ARMAsmParser::
3766cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3767                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3768  // Create a writeback register dummy placeholder.
3769  Inst.addOperand(MCOperand::CreateImm(0));
3770  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3771  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3772  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3773  return true;
3774}
3775
3776/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3777/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3778/// when they refer multiple MIOperands inside a single one.
3779bool ARMAsmParser::
3780cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3781                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3782  // Create a writeback register dummy placeholder.
3783  Inst.addOperand(MCOperand::CreateImm(0));
3784  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3785  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3786  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3787  return true;
3788}
3789
3790/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3791/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3792/// when they refer multiple MIOperands inside a single one.
3793bool ARMAsmParser::
3794cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3795                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3796  // Rt
3797  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3798  // Create a writeback register dummy placeholder.
3799  Inst.addOperand(MCOperand::CreateImm(0));
3800  // addr
3801  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3802  // offset
3803  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3804  // pred
3805  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3806  return true;
3807}
3808
3809/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3810/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3811/// when they refer multiple MIOperands inside a single one.
3812bool ARMAsmParser::
3813cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3814                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3815  // Rt
3816  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3817  // Create a writeback register dummy placeholder.
3818  Inst.addOperand(MCOperand::CreateImm(0));
3819  // addr
3820  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3821  // offset
3822  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3823  // pred
3824  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3825  return true;
3826}
3827
3828/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3829/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3830/// when they refer multiple MIOperands inside a single one.
3831bool ARMAsmParser::
3832cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3833                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3834  // Create a writeback register dummy placeholder.
3835  Inst.addOperand(MCOperand::CreateImm(0));
3836  // Rt
3837  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3838  // addr
3839  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3840  // offset
3841  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3842  // pred
3843  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3844  return true;
3845}
3846
3847/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3848/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3849/// when they refer multiple MIOperands inside a single one.
3850bool ARMAsmParser::
3851cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3852                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3853  // Create a writeback register dummy placeholder.
3854  Inst.addOperand(MCOperand::CreateImm(0));
3855  // Rt
3856  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3857  // addr
3858  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3859  // offset
3860  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3861  // pred
3862  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3863  return true;
3864}
3865
3866/// cvtLdrdPre - Convert parsed operands to MCInst.
3867/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3868/// when they refer multiple MIOperands inside a single one.
3869bool ARMAsmParser::
3870cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3871           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3872  // Rt, Rt2
3873  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3874  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3875  // Create a writeback register dummy placeholder.
3876  Inst.addOperand(MCOperand::CreateImm(0));
3877  // addr
3878  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3879  // pred
3880  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3881  return true;
3882}
3883
3884/// cvtStrdPre - Convert parsed operands to MCInst.
3885/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3886/// when they refer multiple MIOperands inside a single one.
3887bool ARMAsmParser::
3888cvtStrdPre(MCInst &Inst, unsigned Opcode,
3889           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3890  // Create a writeback register dummy placeholder.
3891  Inst.addOperand(MCOperand::CreateImm(0));
3892  // Rt, Rt2
3893  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3894  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3895  // addr
3896  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3897  // pred
3898  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3899  return true;
3900}
3901
3902/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3903/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3904/// when they refer multiple MIOperands inside a single one.
3905bool ARMAsmParser::
3906cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3907                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3908  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3909  // Create a writeback register dummy placeholder.
3910  Inst.addOperand(MCOperand::CreateImm(0));
3911  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3912  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3913  return true;
3914}
3915
3916/// cvtThumbMultiple- Convert parsed operands to MCInst.
3917/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3918/// when they refer multiple MIOperands inside a single one.
3919bool ARMAsmParser::
3920cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3921           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3922  // The second source operand must be the same register as the destination
3923  // operand.
3924  if (Operands.size() == 6 &&
3925      (((ARMOperand*)Operands[3])->getReg() !=
3926       ((ARMOperand*)Operands[5])->getReg()) &&
3927      (((ARMOperand*)Operands[3])->getReg() !=
3928       ((ARMOperand*)Operands[4])->getReg())) {
3929    Error(Operands[3]->getStartLoc(),
3930          "destination register must match source register");
3931    return false;
3932  }
3933  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3934  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3935  // If we have a three-operand form, make sure to set Rn to be the operand
3936  // that isn't the same as Rd.
3937  unsigned RegOp = 4;
3938  if (Operands.size() == 6 &&
3939      ((ARMOperand*)Operands[4])->getReg() ==
3940        ((ARMOperand*)Operands[3])->getReg())
3941    RegOp = 5;
3942  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3943  Inst.addOperand(Inst.getOperand(0));
3944  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3945
3946  return true;
3947}
3948
3949bool ARMAsmParser::
3950cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3951              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3952  // Vd
3953  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3954  // Create a writeback register dummy placeholder.
3955  Inst.addOperand(MCOperand::CreateImm(0));
3956  // Vn
3957  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3958  // pred
3959  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3960  return true;
3961}
3962
3963bool ARMAsmParser::
3964cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3965                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3966  // Vd
3967  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3968  // Create a writeback register dummy placeholder.
3969  Inst.addOperand(MCOperand::CreateImm(0));
3970  // Vn
3971  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3972  // Vm
3973  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3974  // pred
3975  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3976  return true;
3977}
3978
3979bool ARMAsmParser::
3980cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3981              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3982  // Create a writeback register dummy placeholder.
3983  Inst.addOperand(MCOperand::CreateImm(0));
3984  // Vn
3985  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3986  // Vt
3987  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3988  // pred
3989  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3990  return true;
3991}
3992
3993bool ARMAsmParser::
3994cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3995                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3996  // Create a writeback register dummy placeholder.
3997  Inst.addOperand(MCOperand::CreateImm(0));
3998  // Vn
3999  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4000  // Vm
4001  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4002  // Vt
4003  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4004  // pred
4005  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4006  return true;
4007}
4008
4009/// Parse an ARM memory expression, return false if successful else return true
4010/// or an error.  The first token must be a '[' when called.
4011bool ARMAsmParser::
4012parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4013  SMLoc S, E;
4014  assert(Parser.getTok().is(AsmToken::LBrac) &&
4015         "Token is not a Left Bracket");
4016  S = Parser.getTok().getLoc();
4017  Parser.Lex(); // Eat left bracket token.
4018
4019  const AsmToken &BaseRegTok = Parser.getTok();
4020  int BaseRegNum = tryParseRegister();
4021  if (BaseRegNum == -1)
4022    return Error(BaseRegTok.getLoc(), "register expected");
4023
4024  // The next token must either be a comma or a closing bracket.
4025  const AsmToken &Tok = Parser.getTok();
4026  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4027    return Error(Tok.getLoc(), "malformed memory operand");
4028
4029  if (Tok.is(AsmToken::RBrac)) {
4030    E = Tok.getLoc();
4031    Parser.Lex(); // Eat right bracket token.
4032
4033    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4034                                             0, 0, false, S, E));
4035
4036    // If there's a pre-indexing writeback marker, '!', just add it as a token
4037    // operand. It's rather odd, but syntactically valid.
4038    if (Parser.getTok().is(AsmToken::Exclaim)) {
4039      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4040      Parser.Lex(); // Eat the '!'.
4041    }
4042
4043    return false;
4044  }
4045
4046  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4047  Parser.Lex(); // Eat the comma.
4048
4049  // If we have a ':', it's an alignment specifier.
4050  if (Parser.getTok().is(AsmToken::Colon)) {
4051    Parser.Lex(); // Eat the ':'.
4052    E = Parser.getTok().getLoc();
4053
4054    const MCExpr *Expr;
4055    if (getParser().ParseExpression(Expr))
4056     return true;
4057
4058    // The expression has to be a constant. Memory references with relocations
4059    // don't come through here, as they use the <label> forms of the relevant
4060    // instructions.
4061    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4062    if (!CE)
4063      return Error (E, "constant expression expected");
4064
4065    unsigned Align = 0;
4066    switch (CE->getValue()) {
4067    default:
4068      return Error(E,
4069                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4070    case 16:  Align = 2; break;
4071    case 32:  Align = 4; break;
4072    case 64:  Align = 8; break;
4073    case 128: Align = 16; break;
4074    case 256: Align = 32; break;
4075    }
4076
4077    // Now we should have the closing ']'
4078    E = Parser.getTok().getLoc();
4079    if (Parser.getTok().isNot(AsmToken::RBrac))
4080      return Error(E, "']' expected");
4081    Parser.Lex(); // Eat right bracket token.
4082
4083    // Don't worry about range checking the value here. That's handled by
4084    // the is*() predicates.
4085    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4086                                             ARM_AM::no_shift, 0, Align,
4087                                             false, S, E));
4088
4089    // If there's a pre-indexing writeback marker, '!', just add it as a token
4090    // operand.
4091    if (Parser.getTok().is(AsmToken::Exclaim)) {
4092      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4093      Parser.Lex(); // Eat the '!'.
4094    }
4095
4096    return false;
4097  }
4098
4099  // If we have a '#', it's an immediate offset, else assume it's a register
4100  // offset. Be friendly and also accept a plain integer (without a leading
4101  // hash) for gas compatibility.
4102  if (Parser.getTok().is(AsmToken::Hash) ||
4103      Parser.getTok().is(AsmToken::Dollar) ||
4104      Parser.getTok().is(AsmToken::Integer)) {
4105    if (Parser.getTok().isNot(AsmToken::Integer))
4106      Parser.Lex(); // Eat the '#'.
4107    E = Parser.getTok().getLoc();
4108
4109    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4110    const MCExpr *Offset;
4111    if (getParser().ParseExpression(Offset))
4112     return true;
4113
4114    // The expression has to be a constant. Memory references with relocations
4115    // don't come through here, as they use the <label> forms of the relevant
4116    // instructions.
4117    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4118    if (!CE)
4119      return Error (E, "constant expression expected");
4120
4121    // If the constant was #-0, represent it as INT32_MIN.
4122    int32_t Val = CE->getValue();
4123    if (isNegative && Val == 0)
4124      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4125
4126    // Now we should have the closing ']'
4127    E = Parser.getTok().getLoc();
4128    if (Parser.getTok().isNot(AsmToken::RBrac))
4129      return Error(E, "']' expected");
4130    Parser.Lex(); // Eat right bracket token.
4131
4132    // Don't worry about range checking the value here. That's handled by
4133    // the is*() predicates.
4134    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4135                                             ARM_AM::no_shift, 0, 0,
4136                                             false, S, E));
4137
4138    // If there's a pre-indexing writeback marker, '!', just add it as a token
4139    // operand.
4140    if (Parser.getTok().is(AsmToken::Exclaim)) {
4141      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4142      Parser.Lex(); // Eat the '!'.
4143    }
4144
4145    return false;
4146  }
4147
4148  // The register offset is optionally preceded by a '+' or '-'
4149  bool isNegative = false;
4150  if (Parser.getTok().is(AsmToken::Minus)) {
4151    isNegative = true;
4152    Parser.Lex(); // Eat the '-'.
4153  } else if (Parser.getTok().is(AsmToken::Plus)) {
4154    // Nothing to do.
4155    Parser.Lex(); // Eat the '+'.
4156  }
4157
4158  E = Parser.getTok().getLoc();
4159  int OffsetRegNum = tryParseRegister();
4160  if (OffsetRegNum == -1)
4161    return Error(E, "register expected");
4162
4163  // If there's a shift operator, handle it.
4164  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4165  unsigned ShiftImm = 0;
4166  if (Parser.getTok().is(AsmToken::Comma)) {
4167    Parser.Lex(); // Eat the ','.
4168    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4169      return true;
4170  }
4171
4172  // Now we should have the closing ']'
4173  E = Parser.getTok().getLoc();
4174  if (Parser.getTok().isNot(AsmToken::RBrac))
4175    return Error(E, "']' expected");
4176  Parser.Lex(); // Eat right bracket token.
4177
4178  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4179                                           ShiftType, ShiftImm, 0, isNegative,
4180                                           S, E));
4181
4182  // If there's a pre-indexing writeback marker, '!', just add it as a token
4183  // operand.
4184  if (Parser.getTok().is(AsmToken::Exclaim)) {
4185    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4186    Parser.Lex(); // Eat the '!'.
4187  }
4188
4189  return false;
4190}
4191
4192/// parseMemRegOffsetShift - one of these two:
4193///   ( lsl | lsr | asr | ror ) , # shift_amount
4194///   rrx
4195/// return true if it parses a shift otherwise it returns false.
4196bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4197                                          unsigned &Amount) {
4198  SMLoc Loc = Parser.getTok().getLoc();
4199  const AsmToken &Tok = Parser.getTok();
4200  if (Tok.isNot(AsmToken::Identifier))
4201    return true;
4202  StringRef ShiftName = Tok.getString();
4203  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4204      ShiftName == "asl" || ShiftName == "ASL")
4205    St = ARM_AM::lsl;
4206  else if (ShiftName == "lsr" || ShiftName == "LSR")
4207    St = ARM_AM::lsr;
4208  else if (ShiftName == "asr" || ShiftName == "ASR")
4209    St = ARM_AM::asr;
4210  else if (ShiftName == "ror" || ShiftName == "ROR")
4211    St = ARM_AM::ror;
4212  else if (ShiftName == "rrx" || ShiftName == "RRX")
4213    St = ARM_AM::rrx;
4214  else
4215    return Error(Loc, "illegal shift operator");
4216  Parser.Lex(); // Eat shift type token.
4217
4218  // rrx stands alone.
4219  Amount = 0;
4220  if (St != ARM_AM::rrx) {
4221    Loc = Parser.getTok().getLoc();
4222    // A '#' and a shift amount.
4223    const AsmToken &HashTok = Parser.getTok();
4224    if (HashTok.isNot(AsmToken::Hash) &&
4225        HashTok.isNot(AsmToken::Dollar))
4226      return Error(HashTok.getLoc(), "'#' expected");
4227    Parser.Lex(); // Eat hash token.
4228
4229    const MCExpr *Expr;
4230    if (getParser().ParseExpression(Expr))
4231      return true;
4232    // Range check the immediate.
4233    // lsl, ror: 0 <= imm <= 31
4234    // lsr, asr: 0 <= imm <= 32
4235    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4236    if (!CE)
4237      return Error(Loc, "shift amount must be an immediate");
4238    int64_t Imm = CE->getValue();
4239    if (Imm < 0 ||
4240        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4241        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4242      return Error(Loc, "immediate shift value out of range");
4243    Amount = Imm;
4244  }
4245
4246  return false;
4247}
4248
4249/// parseFPImm - A floating point immediate expression operand.
4250ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4251parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4252  SMLoc S = Parser.getTok().getLoc();
4253
4254  if (Parser.getTok().isNot(AsmToken::Hash) &&
4255      Parser.getTok().isNot(AsmToken::Dollar))
4256    return MatchOperand_NoMatch;
4257
4258  // Disambiguate the VMOV forms that can accept an FP immediate.
4259  // vmov.f32 <sreg>, #imm
4260  // vmov.f64 <dreg>, #imm
4261  // vmov.f32 <dreg>, #imm  @ vector f32x2
4262  // vmov.f32 <qreg>, #imm  @ vector f32x4
4263  //
4264  // There are also the NEON VMOV instructions which expect an
4265  // integer constant. Make sure we don't try to parse an FPImm
4266  // for these:
4267  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4268  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4269  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4270                           TyOp->getToken() != ".f64"))
4271    return MatchOperand_NoMatch;
4272
4273  Parser.Lex(); // Eat the '#'.
4274
4275  // Handle negation, as that still comes through as a separate token.
4276  bool isNegative = false;
4277  if (Parser.getTok().is(AsmToken::Minus)) {
4278    isNegative = true;
4279    Parser.Lex();
4280  }
4281  const AsmToken &Tok = Parser.getTok();
4282  if (Tok.is(AsmToken::Real)) {
4283    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4284    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4285    // If we had a '-' in front, toggle the sign bit.
4286    IntVal ^= (uint64_t)isNegative << 63;
4287    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4288    Parser.Lex(); // Eat the token.
4289    if (Val == -1) {
4290      TokError("floating point value out of range");
4291      return MatchOperand_ParseFail;
4292    }
4293    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4294    return MatchOperand_Success;
4295  }
4296  if (Tok.is(AsmToken::Integer)) {
4297    int64_t Val = Tok.getIntVal();
4298    Parser.Lex(); // Eat the token.
4299    if (Val > 255 || Val < 0) {
4300      TokError("encoded floating point value out of range");
4301      return MatchOperand_ParseFail;
4302    }
4303    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4304    return MatchOperand_Success;
4305  }
4306
4307  TokError("invalid floating point immediate");
4308  return MatchOperand_ParseFail;
4309}
4310/// Parse a arm instruction operand.  For now this parses the operand regardless
4311/// of the mnemonic.
4312bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4313                                StringRef Mnemonic) {
4314  SMLoc S, E;
4315
4316  // Check if the current operand has a custom associated parser, if so, try to
4317  // custom parse the operand, or fallback to the general approach.
4318  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4319  if (ResTy == MatchOperand_Success)
4320    return false;
4321  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4322  // there was a match, but an error occurred, in which case, just return that
4323  // the operand parsing failed.
4324  if (ResTy == MatchOperand_ParseFail)
4325    return true;
4326
4327  switch (getLexer().getKind()) {
4328  default:
4329    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4330    return true;
4331  case AsmToken::Identifier: {
4332    if (!tryParseRegisterWithWriteBack(Operands))
4333      return false;
4334    int Res = tryParseShiftRegister(Operands);
4335    if (Res == 0) // success
4336      return false;
4337    else if (Res == -1) // irrecoverable error
4338      return true;
4339    // If this is VMRS, check for the apsr_nzcv operand.
4340    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4341      S = Parser.getTok().getLoc();
4342      Parser.Lex();
4343      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4344      return false;
4345    }
4346
4347    // Fall though for the Identifier case that is not a register or a
4348    // special name.
4349  }
4350  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4351  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4352  case AsmToken::String:  // quoted label names.
4353  case AsmToken::Dot: {   // . as a branch target
4354    // This was not a register so parse other operands that start with an
4355    // identifier (like labels) as expressions and create them as immediates.
4356    const MCExpr *IdVal;
4357    S = Parser.getTok().getLoc();
4358    if (getParser().ParseExpression(IdVal))
4359      return true;
4360    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4361    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4362    return false;
4363  }
4364  case AsmToken::LBrac:
4365    return parseMemory(Operands);
4366  case AsmToken::LCurly:
4367    return parseRegisterList(Operands);
4368  case AsmToken::Dollar:
4369  case AsmToken::Hash: {
4370    // #42 -> immediate.
4371    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4372    S = Parser.getTok().getLoc();
4373    Parser.Lex();
4374    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4375    const MCExpr *ImmVal;
4376    if (getParser().ParseExpression(ImmVal))
4377      return true;
4378    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4379    if (CE) {
4380      int32_t Val = CE->getValue();
4381      if (isNegative && Val == 0)
4382        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4383    }
4384    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4385    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4386    return false;
4387  }
4388  case AsmToken::Colon: {
4389    // ":lower16:" and ":upper16:" expression prefixes
4390    // FIXME: Check it's an expression prefix,
4391    // e.g. (FOO - :lower16:BAR) isn't legal.
4392    ARMMCExpr::VariantKind RefKind;
4393    if (parsePrefix(RefKind))
4394      return true;
4395
4396    const MCExpr *SubExprVal;
4397    if (getParser().ParseExpression(SubExprVal))
4398      return true;
4399
4400    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4401                                                   getContext());
4402    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4403    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4404    return false;
4405  }
4406  }
4407}
4408
4409// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4410//  :lower16: and :upper16:.
4411bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4412  RefKind = ARMMCExpr::VK_ARM_None;
4413
4414  // :lower16: and :upper16: modifiers
4415  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4416  Parser.Lex(); // Eat ':'
4417
4418  if (getLexer().isNot(AsmToken::Identifier)) {
4419    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4420    return true;
4421  }
4422
4423  StringRef IDVal = Parser.getTok().getIdentifier();
4424  if (IDVal == "lower16") {
4425    RefKind = ARMMCExpr::VK_ARM_LO16;
4426  } else if (IDVal == "upper16") {
4427    RefKind = ARMMCExpr::VK_ARM_HI16;
4428  } else {
4429    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4430    return true;
4431  }
4432  Parser.Lex();
4433
4434  if (getLexer().isNot(AsmToken::Colon)) {
4435    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4436    return true;
4437  }
4438  Parser.Lex(); // Eat the last ':'
4439  return false;
4440}
4441
4442/// \brief Given a mnemonic, split out possible predication code and carry
4443/// setting letters to form a canonical mnemonic and flags.
4444//
4445// FIXME: Would be nice to autogen this.
4446// FIXME: This is a bit of a maze of special cases.
4447StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4448                                      unsigned &PredicationCode,
4449                                      bool &CarrySetting,
4450                                      unsigned &ProcessorIMod,
4451                                      StringRef &ITMask) {
4452  PredicationCode = ARMCC::AL;
4453  CarrySetting = false;
4454  ProcessorIMod = 0;
4455
4456  // Ignore some mnemonics we know aren't predicated forms.
4457  //
4458  // FIXME: Would be nice to autogen this.
4459  if ((Mnemonic == "movs" && isThumb()) ||
4460      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4461      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4462      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4463      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4464      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4465      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4466      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4467      Mnemonic == "fmuls")
4468    return Mnemonic;
4469
4470  // First, split out any predication code. Ignore mnemonics we know aren't
4471  // predicated but do have a carry-set and so weren't caught above.
4472  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4473      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4474      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4475      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4476    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4477      .Case("eq", ARMCC::EQ)
4478      .Case("ne", ARMCC::NE)
4479      .Case("hs", ARMCC::HS)
4480      .Case("cs", ARMCC::HS)
4481      .Case("lo", ARMCC::LO)
4482      .Case("cc", ARMCC::LO)
4483      .Case("mi", ARMCC::MI)
4484      .Case("pl", ARMCC::PL)
4485      .Case("vs", ARMCC::VS)
4486      .Case("vc", ARMCC::VC)
4487      .Case("hi", ARMCC::HI)
4488      .Case("ls", ARMCC::LS)
4489      .Case("ge", ARMCC::GE)
4490      .Case("lt", ARMCC::LT)
4491      .Case("gt", ARMCC::GT)
4492      .Case("le", ARMCC::LE)
4493      .Case("al", ARMCC::AL)
4494      .Default(~0U);
4495    if (CC != ~0U) {
4496      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4497      PredicationCode = CC;
4498    }
4499  }
4500
4501  // Next, determine if we have a carry setting bit. We explicitly ignore all
4502  // the instructions we know end in 's'.
4503  if (Mnemonic.endswith("s") &&
4504      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4505        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4506        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4507        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4508        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4509        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4510        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4511        Mnemonic == "fmuls" ||
4512        (Mnemonic == "movs" && isThumb()))) {
4513    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4514    CarrySetting = true;
4515  }
4516
4517  // The "cps" instruction can have a interrupt mode operand which is glued into
4518  // the mnemonic. Check if this is the case, split it and parse the imod op
4519  if (Mnemonic.startswith("cps")) {
4520    // Split out any imod code.
4521    unsigned IMod =
4522      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4523      .Case("ie", ARM_PROC::IE)
4524      .Case("id", ARM_PROC::ID)
4525      .Default(~0U);
4526    if (IMod != ~0U) {
4527      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4528      ProcessorIMod = IMod;
4529    }
4530  }
4531
4532  // The "it" instruction has the condition mask on the end of the mnemonic.
4533  if (Mnemonic.startswith("it")) {
4534    ITMask = Mnemonic.slice(2, Mnemonic.size());
4535    Mnemonic = Mnemonic.slice(0, 2);
4536  }
4537
4538  return Mnemonic;
4539}
4540
4541/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4542/// inclusion of carry set or predication code operands.
4543//
4544// FIXME: It would be nice to autogen this.
4545void ARMAsmParser::
4546getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4547                      bool &CanAcceptPredicationCode) {
4548  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4549      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4550      Mnemonic == "add" || Mnemonic == "adc" ||
4551      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4552      Mnemonic == "orr" || Mnemonic == "mvn" ||
4553      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4554      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4555      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4556                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4557                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4558    CanAcceptCarrySet = true;
4559  } else
4560    CanAcceptCarrySet = false;
4561
4562  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4563      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4564      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4565      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4566      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4567      (Mnemonic == "clrex" && !isThumb()) ||
4568      (Mnemonic == "nop" && isThumbOne()) ||
4569      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4570        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4571        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4572      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4573       !isThumb()) ||
4574      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4575    CanAcceptPredicationCode = false;
4576  } else
4577    CanAcceptPredicationCode = true;
4578
4579  if (isThumb()) {
4580    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4581        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4582      CanAcceptPredicationCode = false;
4583  }
4584}
4585
4586bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4587                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4588  // FIXME: This is all horribly hacky. We really need a better way to deal
4589  // with optional operands like this in the matcher table.
4590
4591  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4592  // another does not. Specifically, the MOVW instruction does not. So we
4593  // special case it here and remove the defaulted (non-setting) cc_out
4594  // operand if that's the instruction we're trying to match.
4595  //
4596  // We do this as post-processing of the explicit operands rather than just
4597  // conditionally adding the cc_out in the first place because we need
4598  // to check the type of the parsed immediate operand.
4599  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4600      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4601      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4602      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4603    return true;
4604
4605  // Register-register 'add' for thumb does not have a cc_out operand
4606  // when there are only two register operands.
4607  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4608      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4609      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4610      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4611    return true;
4612  // Register-register 'add' for thumb does not have a cc_out operand
4613  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4614  // have to check the immediate range here since Thumb2 has a variant
4615  // that can handle a different range and has a cc_out operand.
4616  if (((isThumb() && Mnemonic == "add") ||
4617       (isThumbTwo() && Mnemonic == "sub")) &&
4618      Operands.size() == 6 &&
4619      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4620      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4621      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4622      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4623      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4624       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4625    return true;
4626  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4627  // imm0_4095 variant. That's the least-preferred variant when
4628  // selecting via the generic "add" mnemonic, so to know that we
4629  // should remove the cc_out operand, we have to explicitly check that
4630  // it's not one of the other variants. Ugh.
4631  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4632      Operands.size() == 6 &&
4633      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4634      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4635      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4636    // Nest conditions rather than one big 'if' statement for readability.
4637    //
4638    // If either register is a high reg, it's either one of the SP
4639    // variants (handled above) or a 32-bit encoding, so we just
4640    // check against T3.
4641    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4642         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4643        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4644      return false;
4645    // If both registers are low, we're in an IT block, and the immediate is
4646    // in range, we should use encoding T1 instead, which has a cc_out.
4647    if (inITBlock() &&
4648        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4649        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4650        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4651      return false;
4652
4653    // Otherwise, we use encoding T4, which does not have a cc_out
4654    // operand.
4655    return true;
4656  }
4657
4658  // The thumb2 multiply instruction doesn't have a CCOut register, so
4659  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4660  // use the 16-bit encoding or not.
4661  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4662      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4663      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4664      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4665      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4666      // If the registers aren't low regs, the destination reg isn't the
4667      // same as one of the source regs, or the cc_out operand is zero
4668      // outside of an IT block, we have to use the 32-bit encoding, so
4669      // remove the cc_out operand.
4670      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4671       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4672       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4673       !inITBlock() ||
4674       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4675        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4676        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4677        static_cast<ARMOperand*>(Operands[4])->getReg())))
4678    return true;
4679
4680  // Also check the 'mul' syntax variant that doesn't specify an explicit
4681  // destination register.
4682  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4683      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4684      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4685      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4686      // If the registers aren't low regs  or the cc_out operand is zero
4687      // outside of an IT block, we have to use the 32-bit encoding, so
4688      // remove the cc_out operand.
4689      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4690       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4691       !inITBlock()))
4692    return true;
4693
4694
4695
4696  // Register-register 'add/sub' for thumb does not have a cc_out operand
4697  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4698  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4699  // right, this will result in better diagnostics (which operand is off)
4700  // anyway.
4701  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4702      (Operands.size() == 5 || Operands.size() == 6) &&
4703      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4704      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4705      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4706    return true;
4707
4708  return false;
4709}
4710
4711static bool isDataTypeToken(StringRef Tok) {
4712  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4713    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4714    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4715    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4716    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4717    Tok == ".f" || Tok == ".d";
4718}
4719
4720// FIXME: This bit should probably be handled via an explicit match class
4721// in the .td files that matches the suffix instead of having it be
4722// a literal string token the way it is now.
4723static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4724  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4725}
4726
4727static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4728/// Parse an arm instruction mnemonic followed by its operands.
4729bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4730                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4731  // Apply mnemonic aliases before doing anything else, as the destination
4732  // mnemnonic may include suffices and we want to handle them normally.
4733  // The generic tblgen'erated code does this later, at the start of
4734  // MatchInstructionImpl(), but that's too late for aliases that include
4735  // any sort of suffix.
4736  unsigned AvailableFeatures = getAvailableFeatures();
4737  applyMnemonicAliases(Name, AvailableFeatures);
4738
4739  // First check for the ARM-specific .req directive.
4740  if (Parser.getTok().is(AsmToken::Identifier) &&
4741      Parser.getTok().getIdentifier() == ".req") {
4742    parseDirectiveReq(Name, NameLoc);
4743    // We always return 'error' for this, as we're done with this
4744    // statement and don't need to match the 'instruction."
4745    return true;
4746  }
4747
4748  // Create the leading tokens for the mnemonic, split by '.' characters.
4749  size_t Start = 0, Next = Name.find('.');
4750  StringRef Mnemonic = Name.slice(Start, Next);
4751
4752  // Split out the predication code and carry setting flag from the mnemonic.
4753  unsigned PredicationCode;
4754  unsigned ProcessorIMod;
4755  bool CarrySetting;
4756  StringRef ITMask;
4757  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4758                           ProcessorIMod, ITMask);
4759
4760  // In Thumb1, only the branch (B) instruction can be predicated.
4761  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4762    Parser.EatToEndOfStatement();
4763    return Error(NameLoc, "conditional execution not supported in Thumb1");
4764  }
4765
4766  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4767
4768  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4769  // is the mask as it will be for the IT encoding if the conditional
4770  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4771  // where the conditional bit0 is zero, the instruction post-processing
4772  // will adjust the mask accordingly.
4773  if (Mnemonic == "it") {
4774    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4775    if (ITMask.size() > 3) {
4776      Parser.EatToEndOfStatement();
4777      return Error(Loc, "too many conditions on IT instruction");
4778    }
4779    unsigned Mask = 8;
4780    for (unsigned i = ITMask.size(); i != 0; --i) {
4781      char pos = ITMask[i - 1];
4782      if (pos != 't' && pos != 'e') {
4783        Parser.EatToEndOfStatement();
4784        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4785      }
4786      Mask >>= 1;
4787      if (ITMask[i - 1] == 't')
4788        Mask |= 8;
4789    }
4790    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4791  }
4792
4793  // FIXME: This is all a pretty gross hack. We should automatically handle
4794  // optional operands like this via tblgen.
4795
4796  // Next, add the CCOut and ConditionCode operands, if needed.
4797  //
4798  // For mnemonics which can ever incorporate a carry setting bit or predication
4799  // code, our matching model involves us always generating CCOut and
4800  // ConditionCode operands to match the mnemonic "as written" and then we let
4801  // the matcher deal with finding the right instruction or generating an
4802  // appropriate error.
4803  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4804  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4805
4806  // If we had a carry-set on an instruction that can't do that, issue an
4807  // error.
4808  if (!CanAcceptCarrySet && CarrySetting) {
4809    Parser.EatToEndOfStatement();
4810    return Error(NameLoc, "instruction '" + Mnemonic +
4811                 "' can not set flags, but 's' suffix specified");
4812  }
4813  // If we had a predication code on an instruction that can't do that, issue an
4814  // error.
4815  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4816    Parser.EatToEndOfStatement();
4817    return Error(NameLoc, "instruction '" + Mnemonic +
4818                 "' is not predicable, but condition code specified");
4819  }
4820
4821  // Add the carry setting operand, if necessary.
4822  if (CanAcceptCarrySet) {
4823    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4824    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4825                                               Loc));
4826  }
4827
4828  // Add the predication code operand, if necessary.
4829  if (CanAcceptPredicationCode) {
4830    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4831                                      CarrySetting);
4832    Operands.push_back(ARMOperand::CreateCondCode(
4833                         ARMCC::CondCodes(PredicationCode), Loc));
4834  }
4835
4836  // Add the processor imod operand, if necessary.
4837  if (ProcessorIMod) {
4838    Operands.push_back(ARMOperand::CreateImm(
4839          MCConstantExpr::Create(ProcessorIMod, getContext()),
4840                                 NameLoc, NameLoc));
4841  }
4842
4843  // Add the remaining tokens in the mnemonic.
4844  while (Next != StringRef::npos) {
4845    Start = Next;
4846    Next = Name.find('.', Start + 1);
4847    StringRef ExtraToken = Name.slice(Start, Next);
4848
4849    // Some NEON instructions have an optional datatype suffix that is
4850    // completely ignored. Check for that.
4851    if (isDataTypeToken(ExtraToken) &&
4852        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4853      continue;
4854
4855    if (ExtraToken != ".n") {
4856      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4857      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4858    }
4859  }
4860
4861  // Read the remaining operands.
4862  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4863    // Read the first operand.
4864    if (parseOperand(Operands, Mnemonic)) {
4865      Parser.EatToEndOfStatement();
4866      return true;
4867    }
4868
4869    while (getLexer().is(AsmToken::Comma)) {
4870      Parser.Lex();  // Eat the comma.
4871
4872      // Parse and remember the operand.
4873      if (parseOperand(Operands, Mnemonic)) {
4874        Parser.EatToEndOfStatement();
4875        return true;
4876      }
4877    }
4878  }
4879
4880  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4881    SMLoc Loc = getLexer().getLoc();
4882    Parser.EatToEndOfStatement();
4883    return Error(Loc, "unexpected token in argument list");
4884  }
4885
4886  Parser.Lex(); // Consume the EndOfStatement
4887
4888  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4889  // do and don't have a cc_out optional-def operand. With some spot-checks
4890  // of the operand list, we can figure out which variant we're trying to
4891  // parse and adjust accordingly before actually matching. We shouldn't ever
4892  // try to remove a cc_out operand that was explicitly set on the the
4893  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4894  // table driven matcher doesn't fit well with the ARM instruction set.
4895  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4896    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4897    Operands.erase(Operands.begin() + 1);
4898    delete Op;
4899  }
4900
4901  // ARM mode 'blx' need special handling, as the register operand version
4902  // is predicable, but the label operand version is not. So, we can't rely
4903  // on the Mnemonic based checking to correctly figure out when to put
4904  // a k_CondCode operand in the list. If we're trying to match the label
4905  // version, remove the k_CondCode operand here.
4906  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4907      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4908    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4909    Operands.erase(Operands.begin() + 1);
4910    delete Op;
4911  }
4912
4913  // The vector-compare-to-zero instructions have a literal token "#0" at
4914  // the end that comes to here as an immediate operand. Convert it to a
4915  // token to play nicely with the matcher.
4916  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4917      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4918      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4919    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4920    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4921    if (CE && CE->getValue() == 0) {
4922      Operands.erase(Operands.begin() + 5);
4923      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4924      delete Op;
4925    }
4926  }
4927  // VCMP{E} does the same thing, but with a different operand count.
4928  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4929      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4930    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4931    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4932    if (CE && CE->getValue() == 0) {
4933      Operands.erase(Operands.begin() + 4);
4934      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4935      delete Op;
4936    }
4937  }
4938  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4939  // end. Convert it to a token here. Take care not to convert those
4940  // that should hit the Thumb2 encoding.
4941  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4942      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4943      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4944      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4945    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4946    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4947    if (CE && CE->getValue() == 0 &&
4948        (isThumbOne() ||
4949         // The cc_out operand matches the IT block.
4950         ((inITBlock() != CarrySetting) &&
4951         // Neither register operand is a high register.
4952         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4953          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4954      Operands.erase(Operands.begin() + 5);
4955      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4956      delete Op;
4957    }
4958  }
4959
4960  return false;
4961}
4962
4963// Validate context-sensitive operand constraints.
4964
4965// return 'true' if register list contains non-low GPR registers,
4966// 'false' otherwise. If Reg is in the register list or is HiReg, set
4967// 'containsReg' to true.
4968static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4969                                 unsigned HiReg, bool &containsReg) {
4970  containsReg = false;
4971  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4972    unsigned OpReg = Inst.getOperand(i).getReg();
4973    if (OpReg == Reg)
4974      containsReg = true;
4975    // Anything other than a low register isn't legal here.
4976    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4977      return true;
4978  }
4979  return false;
4980}
4981
4982// Check if the specified regisgter is in the register list of the inst,
4983// starting at the indicated operand number.
4984static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4985  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4986    unsigned OpReg = Inst.getOperand(i).getReg();
4987    if (OpReg == Reg)
4988      return true;
4989  }
4990  return false;
4991}
4992
4993// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4994// the ARMInsts array) instead. Getting that here requires awkward
4995// API changes, though. Better way?
4996namespace llvm {
4997extern const MCInstrDesc ARMInsts[];
4998}
4999static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5000  return ARMInsts[Opcode];
5001}
5002
5003// FIXME: We would really like to be able to tablegen'erate this.
5004bool ARMAsmParser::
5005validateInstruction(MCInst &Inst,
5006                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5007  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5008  SMLoc Loc = Operands[0]->getStartLoc();
5009  // Check the IT block state first.
5010  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
5011  // being allowed in IT blocks, but not being predicable.  It just always
5012  // executes.
5013  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
5014    unsigned bit = 1;
5015    if (ITState.FirstCond)
5016      ITState.FirstCond = false;
5017    else
5018      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5019    // The instruction must be predicable.
5020    if (!MCID.isPredicable())
5021      return Error(Loc, "instructions in IT block must be predicable");
5022    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5023    unsigned ITCond = bit ? ITState.Cond :
5024      ARMCC::getOppositeCondition(ITState.Cond);
5025    if (Cond != ITCond) {
5026      // Find the condition code Operand to get its SMLoc information.
5027      SMLoc CondLoc;
5028      for (unsigned i = 1; i < Operands.size(); ++i)
5029        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5030          CondLoc = Operands[i]->getStartLoc();
5031      return Error(CondLoc, "incorrect condition in IT block; got '" +
5032                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5033                   "', but expected '" +
5034                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5035    }
5036  // Check for non-'al' condition codes outside of the IT block.
5037  } else if (isThumbTwo() && MCID.isPredicable() &&
5038             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5039             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5040             Inst.getOpcode() != ARM::t2B)
5041    return Error(Loc, "predicated instructions must be in IT block");
5042
5043  switch (Inst.getOpcode()) {
5044  case ARM::LDRD:
5045  case ARM::LDRD_PRE:
5046  case ARM::LDRD_POST:
5047  case ARM::LDREXD: {
5048    // Rt2 must be Rt + 1.
5049    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5050    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5051    if (Rt2 != Rt + 1)
5052      return Error(Operands[3]->getStartLoc(),
5053                   "destination operands must be sequential");
5054    return false;
5055  }
5056  case ARM::STRD: {
5057    // Rt2 must be Rt + 1.
5058    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5059    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5060    if (Rt2 != Rt + 1)
5061      return Error(Operands[3]->getStartLoc(),
5062                   "source operands must be sequential");
5063    return false;
5064  }
5065  case ARM::STRD_PRE:
5066  case ARM::STRD_POST:
5067  case ARM::STREXD: {
5068    // Rt2 must be Rt + 1.
5069    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5070    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5071    if (Rt2 != Rt + 1)
5072      return Error(Operands[3]->getStartLoc(),
5073                   "source operands must be sequential");
5074    return false;
5075  }
5076  case ARM::SBFX:
5077  case ARM::UBFX: {
5078    // width must be in range [1, 32-lsb]
5079    unsigned lsb = Inst.getOperand(2).getImm();
5080    unsigned widthm1 = Inst.getOperand(3).getImm();
5081    if (widthm1 >= 32 - lsb)
5082      return Error(Operands[5]->getStartLoc(),
5083                   "bitfield width must be in range [1,32-lsb]");
5084    return false;
5085  }
5086  case ARM::tLDMIA: {
5087    // If we're parsing Thumb2, the .w variant is available and handles
5088    // most cases that are normally illegal for a Thumb1 LDM
5089    // instruction. We'll make the transformation in processInstruction()
5090    // if necessary.
5091    //
5092    // Thumb LDM instructions are writeback iff the base register is not
5093    // in the register list.
5094    unsigned Rn = Inst.getOperand(0).getReg();
5095    bool hasWritebackToken =
5096      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5097       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5098    bool listContainsBase;
5099    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5100      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5101                   "registers must be in range r0-r7");
5102    // If we should have writeback, then there should be a '!' token.
5103    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5104      return Error(Operands[2]->getStartLoc(),
5105                   "writeback operator '!' expected");
5106    // If we should not have writeback, there must not be a '!'. This is
5107    // true even for the 32-bit wide encodings.
5108    if (listContainsBase && hasWritebackToken)
5109      return Error(Operands[3]->getStartLoc(),
5110                   "writeback operator '!' not allowed when base register "
5111                   "in register list");
5112
5113    break;
5114  }
5115  case ARM::t2LDMIA_UPD: {
5116    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5117      return Error(Operands[4]->getStartLoc(),
5118                   "writeback operator '!' not allowed when base register "
5119                   "in register list");
5120    break;
5121  }
5122  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5123  // so only issue a diagnostic for thumb1. The instructions will be
5124  // switched to the t2 encodings in processInstruction() if necessary.
5125  case ARM::tPOP: {
5126    bool listContainsBase;
5127    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5128        !isThumbTwo())
5129      return Error(Operands[2]->getStartLoc(),
5130                   "registers must be in range r0-r7 or pc");
5131    break;
5132  }
5133  case ARM::tPUSH: {
5134    bool listContainsBase;
5135    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5136        !isThumbTwo())
5137      return Error(Operands[2]->getStartLoc(),
5138                   "registers must be in range r0-r7 or lr");
5139    break;
5140  }
5141  case ARM::tSTMIA_UPD: {
5142    bool listContainsBase;
5143    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5144      return Error(Operands[4]->getStartLoc(),
5145                   "registers must be in range r0-r7");
5146    break;
5147  }
5148  }
5149
5150  return false;
5151}
5152
5153static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5154  switch(Opc) {
5155  default: assert(0 && "unexpected opcode!");
5156  // VST1LN
5157  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5158  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5159  case ARM::VST1LNdWB_fixed_Asm_U8:
5160    Spacing = 1;
5161    return ARM::VST1LNd8_UPD;
5162  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5163  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5164  case ARM::VST1LNdWB_fixed_Asm_U16:
5165    Spacing = 1;
5166    return ARM::VST1LNd16_UPD;
5167  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5168  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5169  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5170    Spacing = 1;
5171    return ARM::VST1LNd32_UPD;
5172  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5173  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5174  case ARM::VST1LNdWB_register_Asm_U8:
5175    Spacing = 1;
5176    return ARM::VST1LNd8_UPD;
5177  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5178  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5179  case ARM::VST1LNdWB_register_Asm_U16:
5180    Spacing = 1;
5181    return ARM::VST1LNd16_UPD;
5182  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5183  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5184  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5185    Spacing = 1;
5186    return ARM::VST1LNd32_UPD;
5187  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5188  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5189  case ARM::VST1LNdAsm_U8:
5190    Spacing = 1;
5191    return ARM::VST1LNd8;
5192  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5193  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5194  case ARM::VST1LNdAsm_U16:
5195    Spacing = 1;
5196    return ARM::VST1LNd16;
5197  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5198  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5199  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5200    Spacing = 1;
5201    return ARM::VST1LNd32;
5202
5203  // VST2LN
5204  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5205  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5206  case ARM::VST2LNdWB_fixed_Asm_U8:
5207    Spacing = 1;
5208    return ARM::VST2LNd8_UPD;
5209  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5210  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5211  case ARM::VST2LNdWB_fixed_Asm_U16:
5212    Spacing = 1;
5213    return ARM::VST2LNd16_UPD;
5214  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5215  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5216  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5217    Spacing = 1;
5218    return ARM::VST2LNd32_UPD;
5219  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5220  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5221  case ARM::VST2LNqWB_fixed_Asm_U16:
5222    Spacing = 2;
5223    return ARM::VST2LNq16_UPD;
5224  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5225  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5226  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5227    Spacing = 2;
5228    return ARM::VST2LNq32_UPD;
5229
5230  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5231  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5232  case ARM::VST2LNdWB_register_Asm_U8:
5233    Spacing = 1;
5234    return ARM::VST2LNd8_UPD;
5235  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5236  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5237  case ARM::VST2LNdWB_register_Asm_U16:
5238    Spacing = 1;
5239    return ARM::VST2LNd16_UPD;
5240  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5241  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5242  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5243    Spacing = 1;
5244    return ARM::VST2LNd32_UPD;
5245  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5246  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5247  case ARM::VST2LNqWB_register_Asm_U16:
5248    Spacing = 2;
5249    return ARM::VST2LNq16_UPD;
5250  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5251  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5252  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5253    Spacing = 2;
5254    return ARM::VST2LNq32_UPD;
5255
5256  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5257  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5258  case ARM::VST2LNdAsm_U8:
5259    Spacing = 1;
5260    return ARM::VST2LNd8;
5261  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5262  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5263  case ARM::VST2LNdAsm_U16:
5264    Spacing = 1;
5265    return ARM::VST2LNd16;
5266  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5267  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5268  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5269    Spacing = 1;
5270    return ARM::VST2LNd32;
5271  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5272  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5273  case ARM::VST2LNqAsm_U16:
5274    Spacing = 2;
5275    return ARM::VST2LNq16;
5276  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5277  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5278  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5279    Spacing = 2;
5280    return ARM::VST2LNq32;
5281  }
5282}
5283
5284static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5285  switch(Opc) {
5286  default: assert(0 && "unexpected opcode!");
5287  // VLD1LN
5288  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5289  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5290  case ARM::VLD1LNdWB_fixed_Asm_U8:
5291    Spacing = 1;
5292    return ARM::VLD1LNd8_UPD;
5293  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5294  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5295  case ARM::VLD1LNdWB_fixed_Asm_U16:
5296    Spacing = 1;
5297    return ARM::VLD1LNd16_UPD;
5298  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5299  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5300  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5301    Spacing = 1;
5302    return ARM::VLD1LNd32_UPD;
5303  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5304  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5305  case ARM::VLD1LNdWB_register_Asm_U8:
5306    Spacing = 1;
5307    return ARM::VLD1LNd8_UPD;
5308  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5309  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5310  case ARM::VLD1LNdWB_register_Asm_U16:
5311    Spacing = 1;
5312    return ARM::VLD1LNd16_UPD;
5313  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5314  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5315  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5316    Spacing = 1;
5317    return ARM::VLD1LNd32_UPD;
5318  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5319  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5320  case ARM::VLD1LNdAsm_U8:
5321    Spacing = 1;
5322    return ARM::VLD1LNd8;
5323  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5324  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5325  case ARM::VLD1LNdAsm_U16:
5326    Spacing = 1;
5327    return ARM::VLD1LNd16;
5328  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5329  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5330  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5331    Spacing = 1;
5332    return ARM::VLD1LNd32;
5333
5334  // VLD2LN
5335  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5336  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5337  case ARM::VLD2LNdWB_fixed_Asm_U8:
5338    Spacing = 1;
5339    return ARM::VLD2LNd8_UPD;
5340  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5341  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5342  case ARM::VLD2LNdWB_fixed_Asm_U16:
5343    Spacing = 1;
5344    return ARM::VLD2LNd16_UPD;
5345  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5346  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5347  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5348    Spacing = 1;
5349    return ARM::VLD2LNd32_UPD;
5350  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5351  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5352  case ARM::VLD2LNqWB_fixed_Asm_U16:
5353    Spacing = 1;
5354    return ARM::VLD2LNq16_UPD;
5355  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5356  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5357  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5358    Spacing = 2;
5359    return ARM::VLD2LNq32_UPD;
5360  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5361  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5362  case ARM::VLD2LNdWB_register_Asm_U8:
5363    Spacing = 1;
5364    return ARM::VLD2LNd8_UPD;
5365  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5366  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5367  case ARM::VLD2LNdWB_register_Asm_U16:
5368    Spacing = 1;
5369    return ARM::VLD2LNd16_UPD;
5370  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5371  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5372  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5373    Spacing = 1;
5374    return ARM::VLD2LNd32_UPD;
5375  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5376  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5377  case ARM::VLD2LNqWB_register_Asm_U16:
5378    Spacing = 2;
5379    return ARM::VLD2LNq16_UPD;
5380  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5381  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5382  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5383    Spacing = 2;
5384    return ARM::VLD2LNq32_UPD;
5385  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5386  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5387  case ARM::VLD2LNdAsm_U8:
5388    Spacing = 1;
5389    return ARM::VLD2LNd8;
5390  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5391  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5392  case ARM::VLD2LNdAsm_U16:
5393    Spacing = 1;
5394    return ARM::VLD2LNd16;
5395  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5396  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5397  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5398    Spacing = 1;
5399    return ARM::VLD2LNd32;
5400  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5401  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5402  case ARM::VLD2LNqAsm_U16:
5403    Spacing = 2;
5404    return ARM::VLD2LNq16;
5405  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5406  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5407  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5408    Spacing = 2;
5409    return ARM::VLD2LNq32;
5410  }
5411}
5412
5413bool ARMAsmParser::
5414processInstruction(MCInst &Inst,
5415                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5416  switch (Inst.getOpcode()) {
5417  // Handle NEON VST complex aliases.
5418  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5419  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5420  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5421  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5422  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5423  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5424  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5425  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5426    MCInst TmpInst;
5427    // Shuffle the operands around so the lane index operand is in the
5428    // right place.
5429    unsigned Spacing;
5430    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5431    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5432    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5433    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5434    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5435    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5436    TmpInst.addOperand(Inst.getOperand(1)); // lane
5437    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5438    TmpInst.addOperand(Inst.getOperand(6));
5439    Inst = TmpInst;
5440    return true;
5441  }
5442
5443  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5444  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5445  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5446  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5447  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5448  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5449  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5450  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5451  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5452  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5453  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5454  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5455  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5456  case ARM::VST2LNqWB_register_Asm_U32: {
5457    MCInst TmpInst;
5458    // Shuffle the operands around so the lane index operand is in the
5459    // right place.
5460    unsigned Spacing;
5461    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5462    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5463    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5464    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5465    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5466    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5467    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5468                                            Spacing));
5469    TmpInst.addOperand(Inst.getOperand(1)); // lane
5470    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5471    TmpInst.addOperand(Inst.getOperand(6));
5472    Inst = TmpInst;
5473    return true;
5474  }
5475  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5476  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5477  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5478  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5479  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5480  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5481  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5482  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5483    MCInst TmpInst;
5484    // Shuffle the operands around so the lane index operand is in the
5485    // right place.
5486    unsigned Spacing;
5487    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5488    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5489    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5490    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5491    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5492    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5493    TmpInst.addOperand(Inst.getOperand(1)); // lane
5494    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5495    TmpInst.addOperand(Inst.getOperand(5));
5496    Inst = TmpInst;
5497    return true;
5498  }
5499
5500  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5501  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5502  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5503  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5504  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5505  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5506  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5507  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5508  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5509  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5510  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5511  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5512  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5513  case ARM::VST2LNqWB_fixed_Asm_U32: {
5514    MCInst TmpInst;
5515    // Shuffle the operands around so the lane index operand is in the
5516    // right place.
5517    unsigned Spacing;
5518    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5519    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5520    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5521    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5522    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5523    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5524    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5525                                            Spacing));
5526    TmpInst.addOperand(Inst.getOperand(1)); // lane
5527    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5528    TmpInst.addOperand(Inst.getOperand(5));
5529    Inst = TmpInst;
5530    return true;
5531  }
5532  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5533  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5534  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5535  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5536  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5537  case ARM::VST1LNdAsm_U32: {
5538    MCInst TmpInst;
5539    // Shuffle the operands around so the lane index operand is in the
5540    // right place.
5541    unsigned Spacing;
5542    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5543    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5544    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5545    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5546    TmpInst.addOperand(Inst.getOperand(1)); // lane
5547    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5548    TmpInst.addOperand(Inst.getOperand(5));
5549    Inst = TmpInst;
5550    return true;
5551  }
5552
5553  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5554  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5555  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5556  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5557  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5558  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5559  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5560  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5561  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5562    MCInst TmpInst;
5563    // Shuffle the operands around so the lane index operand is in the
5564    // right place.
5565    unsigned Spacing;
5566    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5567    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5568    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5569    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5570    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5571                                            Spacing));
5572    TmpInst.addOperand(Inst.getOperand(1)); // lane
5573    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5574    TmpInst.addOperand(Inst.getOperand(5));
5575    Inst = TmpInst;
5576    return true;
5577  }
5578  // Handle NEON VLD complex aliases.
5579  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5580  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5581  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5582  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5583  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5584  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5585  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5586  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5587    MCInst TmpInst;
5588    // Shuffle the operands around so the lane index operand is in the
5589    // right place.
5590    unsigned Spacing;
5591    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5592    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5593    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5594    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5595    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5596    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5597    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5598    TmpInst.addOperand(Inst.getOperand(1)); // lane
5599    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5600    TmpInst.addOperand(Inst.getOperand(6));
5601    Inst = TmpInst;
5602    return true;
5603  }
5604
5605  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5606  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5607  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5608  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5609  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5610  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5611  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5612  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5613  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5614  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5615  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5616  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5617  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5618  case ARM::VLD2LNqWB_register_Asm_U32: {
5619    MCInst TmpInst;
5620    // Shuffle the operands around so the lane index operand is in the
5621    // right place.
5622    unsigned Spacing;
5623    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5624    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5625    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5626                                            Spacing));
5627    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5628    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5629    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5630    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5631    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5632    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5633                                            Spacing));
5634    TmpInst.addOperand(Inst.getOperand(1)); // lane
5635    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5636    TmpInst.addOperand(Inst.getOperand(6));
5637    Inst = TmpInst;
5638    return true;
5639  }
5640
5641  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5642  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5643  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5644  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5645  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5646  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5647  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5648  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5649    MCInst TmpInst;
5650    // Shuffle the operands around so the lane index operand is in the
5651    // right place.
5652    unsigned Spacing;
5653    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5654    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5655    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5656    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5657    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5658    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5659    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5660    TmpInst.addOperand(Inst.getOperand(1)); // lane
5661    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5662    TmpInst.addOperand(Inst.getOperand(5));
5663    Inst = TmpInst;
5664    return true;
5665  }
5666
5667  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5668  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5669  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5670  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5671  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5672  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5673  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5674  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5675  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5676  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5677  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5678  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5679  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5680  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5681    MCInst TmpInst;
5682    // Shuffle the operands around so the lane index operand is in the
5683    // right place.
5684    unsigned Spacing;
5685    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5686    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5687    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5688                                            Spacing));
5689    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5690    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5691    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5692    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5693    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5694    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5695                                            Spacing));
5696    TmpInst.addOperand(Inst.getOperand(1)); // lane
5697    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5698    TmpInst.addOperand(Inst.getOperand(5));
5699    Inst = TmpInst;
5700    return true;
5701  }
5702
5703  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5704  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5705  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5706  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5707  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5708  case ARM::VLD1LNdAsm_U32: {
5709    MCInst TmpInst;
5710    // Shuffle the operands around so the lane index operand is in the
5711    // right place.
5712    unsigned Spacing;
5713    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5714    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5715    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5716    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5717    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5718    TmpInst.addOperand(Inst.getOperand(1)); // lane
5719    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5720    TmpInst.addOperand(Inst.getOperand(5));
5721    Inst = TmpInst;
5722    return true;
5723  }
5724
5725  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5726  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5727  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5728  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5729  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5730  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5731  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5732  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5733  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5734  case ARM::VLD2LNqAsm_U32: {
5735    MCInst TmpInst;
5736    // Shuffle the operands around so the lane index operand is in the
5737    // right place.
5738    unsigned Spacing;
5739    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5740    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5741    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5742                                            Spacing));
5743    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5744    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5745    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5746    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5747                                            Spacing));
5748    TmpInst.addOperand(Inst.getOperand(1)); // lane
5749    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5750    TmpInst.addOperand(Inst.getOperand(5));
5751    Inst = TmpInst;
5752    return true;
5753  }
5754  // Handle the Thumb2 mode MOV complex aliases.
5755  case ARM::t2MOVsr:
5756  case ARM::t2MOVSsr: {
5757    // Which instruction to expand to depends on the CCOut operand and
5758    // whether we're in an IT block if the register operands are low
5759    // registers.
5760    bool isNarrow = false;
5761    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5762        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5763        isARMLowRegister(Inst.getOperand(2).getReg()) &&
5764        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
5765        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
5766      isNarrow = true;
5767    MCInst TmpInst;
5768    unsigned newOpc;
5769    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
5770    default: llvm_unreachable("unexpected opcode!");
5771    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
5772    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
5773    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
5774    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
5775    }
5776    TmpInst.setOpcode(newOpc);
5777    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5778    if (isNarrow)
5779      TmpInst.addOperand(MCOperand::CreateReg(
5780          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5781    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5782    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5783    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5784    TmpInst.addOperand(Inst.getOperand(5));
5785    if (!isNarrow)
5786      TmpInst.addOperand(MCOperand::CreateReg(
5787          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5788    Inst = TmpInst;
5789    return true;
5790  }
5791  case ARM::t2MOVsi:
5792  case ARM::t2MOVSsi: {
5793    // Which instruction to expand to depends on the CCOut operand and
5794    // whether we're in an IT block if the register operands are low
5795    // registers.
5796    bool isNarrow = false;
5797    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5798        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5799        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5800      isNarrow = true;
5801    MCInst TmpInst;
5802    unsigned newOpc;
5803    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5804    default: llvm_unreachable("unexpected opcode!");
5805    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5806    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5807    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5808    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5809    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
5810    }
5811    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5812    if (Ammount == 32) Ammount = 0;
5813    TmpInst.setOpcode(newOpc);
5814    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5815    if (isNarrow)
5816      TmpInst.addOperand(MCOperand::CreateReg(
5817          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5818    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5819    if (newOpc != ARM::t2RRX)
5820      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5821    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5822    TmpInst.addOperand(Inst.getOperand(4));
5823    if (!isNarrow)
5824      TmpInst.addOperand(MCOperand::CreateReg(
5825          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5826    Inst = TmpInst;
5827    return true;
5828  }
5829  // Handle the ARM mode MOV complex aliases.
5830  case ARM::ASRr:
5831  case ARM::LSRr:
5832  case ARM::LSLr:
5833  case ARM::RORr: {
5834    ARM_AM::ShiftOpc ShiftTy;
5835    switch(Inst.getOpcode()) {
5836    default: llvm_unreachable("unexpected opcode!");
5837    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5838    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5839    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5840    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5841    }
5842    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5843    MCInst TmpInst;
5844    TmpInst.setOpcode(ARM::MOVsr);
5845    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5846    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5847    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5848    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5849    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5850    TmpInst.addOperand(Inst.getOperand(4));
5851    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5852    Inst = TmpInst;
5853    return true;
5854  }
5855  case ARM::ASRi:
5856  case ARM::LSRi:
5857  case ARM::LSLi:
5858  case ARM::RORi: {
5859    ARM_AM::ShiftOpc ShiftTy;
5860    switch(Inst.getOpcode()) {
5861    default: llvm_unreachable("unexpected opcode!");
5862    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5863    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5864    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5865    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5866    }
5867    // A shift by zero is a plain MOVr, not a MOVsi.
5868    unsigned Amt = Inst.getOperand(2).getImm();
5869    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5870    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5871    MCInst TmpInst;
5872    TmpInst.setOpcode(Opc);
5873    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5874    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5875    if (Opc == ARM::MOVsi)
5876      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5877    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5878    TmpInst.addOperand(Inst.getOperand(4));
5879    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5880    Inst = TmpInst;
5881    return true;
5882  }
5883  case ARM::RRXi: {
5884    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5885    MCInst TmpInst;
5886    TmpInst.setOpcode(ARM::MOVsi);
5887    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5888    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5889    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5890    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5891    TmpInst.addOperand(Inst.getOperand(3));
5892    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5893    Inst = TmpInst;
5894    return true;
5895  }
5896  case ARM::t2LDMIA_UPD: {
5897    // If this is a load of a single register, then we should use
5898    // a post-indexed LDR instruction instead, per the ARM ARM.
5899    if (Inst.getNumOperands() != 5)
5900      return false;
5901    MCInst TmpInst;
5902    TmpInst.setOpcode(ARM::t2LDR_POST);
5903    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5904    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5905    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5906    TmpInst.addOperand(MCOperand::CreateImm(4));
5907    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5908    TmpInst.addOperand(Inst.getOperand(3));
5909    Inst = TmpInst;
5910    return true;
5911  }
5912  case ARM::t2STMDB_UPD: {
5913    // If this is a store of a single register, then we should use
5914    // a pre-indexed STR instruction instead, per the ARM ARM.
5915    if (Inst.getNumOperands() != 5)
5916      return false;
5917    MCInst TmpInst;
5918    TmpInst.setOpcode(ARM::t2STR_PRE);
5919    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5920    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5921    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5922    TmpInst.addOperand(MCOperand::CreateImm(-4));
5923    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5924    TmpInst.addOperand(Inst.getOperand(3));
5925    Inst = TmpInst;
5926    return true;
5927  }
5928  case ARM::LDMIA_UPD:
5929    // If this is a load of a single register via a 'pop', then we should use
5930    // a post-indexed LDR instruction instead, per the ARM ARM.
5931    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5932        Inst.getNumOperands() == 5) {
5933      MCInst TmpInst;
5934      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5935      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5936      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5937      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5938      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5939      TmpInst.addOperand(MCOperand::CreateImm(4));
5940      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5941      TmpInst.addOperand(Inst.getOperand(3));
5942      Inst = TmpInst;
5943      return true;
5944    }
5945    break;
5946  case ARM::STMDB_UPD:
5947    // If this is a store of a single register via a 'push', then we should use
5948    // a pre-indexed STR instruction instead, per the ARM ARM.
5949    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5950        Inst.getNumOperands() == 5) {
5951      MCInst TmpInst;
5952      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5953      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5954      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5955      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5956      TmpInst.addOperand(MCOperand::CreateImm(-4));
5957      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5958      TmpInst.addOperand(Inst.getOperand(3));
5959      Inst = TmpInst;
5960    }
5961    break;
5962  case ARM::t2ADDri12:
5963    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5964    // mnemonic was used (not "addw"), encoding T3 is preferred.
5965    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5966        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5967      break;
5968    Inst.setOpcode(ARM::t2ADDri);
5969    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5970    break;
5971  case ARM::t2SUBri12:
5972    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5973    // mnemonic was used (not "subw"), encoding T3 is preferred.
5974    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5975        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5976      break;
5977    Inst.setOpcode(ARM::t2SUBri);
5978    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5979    break;
5980  case ARM::tADDi8:
5981    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5982    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5983    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5984    // to encoding T1 if <Rd> is omitted."
5985    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5986      Inst.setOpcode(ARM::tADDi3);
5987      return true;
5988    }
5989    break;
5990  case ARM::tSUBi8:
5991    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5992    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5993    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5994    // to encoding T1 if <Rd> is omitted."
5995    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5996      Inst.setOpcode(ARM::tSUBi3);
5997      return true;
5998    }
5999    break;
6000  case ARM::t2ADDrr: {
6001    // If the destination and first source operand are the same, and
6002    // there's no setting of the flags, use encoding T2 instead of T3.
6003    // Note that this is only for ADD, not SUB. This mirrors the system
6004    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6005    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6006        Inst.getOperand(5).getReg() != 0 ||
6007        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6008         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6009      break;
6010    MCInst TmpInst;
6011    TmpInst.setOpcode(ARM::tADDhirr);
6012    TmpInst.addOperand(Inst.getOperand(0));
6013    TmpInst.addOperand(Inst.getOperand(0));
6014    TmpInst.addOperand(Inst.getOperand(2));
6015    TmpInst.addOperand(Inst.getOperand(3));
6016    TmpInst.addOperand(Inst.getOperand(4));
6017    Inst = TmpInst;
6018    return true;
6019  }
6020  case ARM::tB:
6021    // A Thumb conditional branch outside of an IT block is a tBcc.
6022    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6023      Inst.setOpcode(ARM::tBcc);
6024      return true;
6025    }
6026    break;
6027  case ARM::t2B:
6028    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6029    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6030      Inst.setOpcode(ARM::t2Bcc);
6031      return true;
6032    }
6033    break;
6034  case ARM::t2Bcc:
6035    // If the conditional is AL or we're in an IT block, we really want t2B.
6036    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6037      Inst.setOpcode(ARM::t2B);
6038      return true;
6039    }
6040    break;
6041  case ARM::tBcc:
6042    // If the conditional is AL, we really want tB.
6043    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6044      Inst.setOpcode(ARM::tB);
6045      return true;
6046    }
6047    break;
6048  case ARM::tLDMIA: {
6049    // If the register list contains any high registers, or if the writeback
6050    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6051    // instead if we're in Thumb2. Otherwise, this should have generated
6052    // an error in validateInstruction().
6053    unsigned Rn = Inst.getOperand(0).getReg();
6054    bool hasWritebackToken =
6055      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6056       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6057    bool listContainsBase;
6058    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6059        (!listContainsBase && !hasWritebackToken) ||
6060        (listContainsBase && hasWritebackToken)) {
6061      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6062      assert (isThumbTwo());
6063      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6064      // If we're switching to the updating version, we need to insert
6065      // the writeback tied operand.
6066      if (hasWritebackToken)
6067        Inst.insert(Inst.begin(),
6068                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6069      return true;
6070    }
6071    break;
6072  }
6073  case ARM::tSTMIA_UPD: {
6074    // If the register list contains any high registers, we need to use
6075    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6076    // should have generated an error in validateInstruction().
6077    unsigned Rn = Inst.getOperand(0).getReg();
6078    bool listContainsBase;
6079    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6080      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6081      assert (isThumbTwo());
6082      Inst.setOpcode(ARM::t2STMIA_UPD);
6083      return true;
6084    }
6085    break;
6086  }
6087  case ARM::tPOP: {
6088    bool listContainsBase;
6089    // If the register list contains any high registers, we need to use
6090    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6091    // should have generated an error in validateInstruction().
6092    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6093      return false;
6094    assert (isThumbTwo());
6095    Inst.setOpcode(ARM::t2LDMIA_UPD);
6096    // Add the base register and writeback operands.
6097    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6098    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6099    return true;
6100  }
6101  case ARM::tPUSH: {
6102    bool listContainsBase;
6103    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6104      return false;
6105    assert (isThumbTwo());
6106    Inst.setOpcode(ARM::t2STMDB_UPD);
6107    // Add the base register and writeback operands.
6108    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6109    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6110    return true;
6111  }
6112  case ARM::t2MOVi: {
6113    // If we can use the 16-bit encoding and the user didn't explicitly
6114    // request the 32-bit variant, transform it here.
6115    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6116        Inst.getOperand(1).getImm() <= 255 &&
6117        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6118         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6119        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6120        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6121         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6122      // The operands aren't in the same order for tMOVi8...
6123      MCInst TmpInst;
6124      TmpInst.setOpcode(ARM::tMOVi8);
6125      TmpInst.addOperand(Inst.getOperand(0));
6126      TmpInst.addOperand(Inst.getOperand(4));
6127      TmpInst.addOperand(Inst.getOperand(1));
6128      TmpInst.addOperand(Inst.getOperand(2));
6129      TmpInst.addOperand(Inst.getOperand(3));
6130      Inst = TmpInst;
6131      return true;
6132    }
6133    break;
6134  }
6135  case ARM::t2MOVr: {
6136    // If we can use the 16-bit encoding and the user didn't explicitly
6137    // request the 32-bit variant, transform it here.
6138    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6139        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6140        Inst.getOperand(2).getImm() == ARMCC::AL &&
6141        Inst.getOperand(4).getReg() == ARM::CPSR &&
6142        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6143         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6144      // The operands aren't the same for tMOV[S]r... (no cc_out)
6145      MCInst TmpInst;
6146      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6147      TmpInst.addOperand(Inst.getOperand(0));
6148      TmpInst.addOperand(Inst.getOperand(1));
6149      TmpInst.addOperand(Inst.getOperand(2));
6150      TmpInst.addOperand(Inst.getOperand(3));
6151      Inst = TmpInst;
6152      return true;
6153    }
6154    break;
6155  }
6156  case ARM::t2SXTH:
6157  case ARM::t2SXTB:
6158  case ARM::t2UXTH:
6159  case ARM::t2UXTB: {
6160    // If we can use the 16-bit encoding and the user didn't explicitly
6161    // request the 32-bit variant, transform it here.
6162    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6163        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6164        Inst.getOperand(2).getImm() == 0 &&
6165        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6166         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6167      unsigned NewOpc;
6168      switch (Inst.getOpcode()) {
6169      default: llvm_unreachable("Illegal opcode!");
6170      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6171      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6172      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6173      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6174      }
6175      // The operands aren't the same for thumb1 (no rotate operand).
6176      MCInst TmpInst;
6177      TmpInst.setOpcode(NewOpc);
6178      TmpInst.addOperand(Inst.getOperand(0));
6179      TmpInst.addOperand(Inst.getOperand(1));
6180      TmpInst.addOperand(Inst.getOperand(3));
6181      TmpInst.addOperand(Inst.getOperand(4));
6182      Inst = TmpInst;
6183      return true;
6184    }
6185    break;
6186  }
6187  case ARM::MOVsi: {
6188    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6189    if (SOpc == ARM_AM::rrx) return false;
6190    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6191      // Shifting by zero is accepted as a vanilla 'MOVr'
6192      MCInst TmpInst;
6193      TmpInst.setOpcode(ARM::MOVr);
6194      TmpInst.addOperand(Inst.getOperand(0));
6195      TmpInst.addOperand(Inst.getOperand(1));
6196      TmpInst.addOperand(Inst.getOperand(3));
6197      TmpInst.addOperand(Inst.getOperand(4));
6198      TmpInst.addOperand(Inst.getOperand(5));
6199      Inst = TmpInst;
6200      return true;
6201    }
6202    return false;
6203  }
6204  case ARM::t2IT: {
6205    // The mask bits for all but the first condition are represented as
6206    // the low bit of the condition code value implies 't'. We currently
6207    // always have 1 implies 't', so XOR toggle the bits if the low bit
6208    // of the condition code is zero. The encoding also expects the low
6209    // bit of the condition to be encoded as bit 4 of the mask operand,
6210    // so mask that in if needed
6211    MCOperand &MO = Inst.getOperand(1);
6212    unsigned Mask = MO.getImm();
6213    unsigned OrigMask = Mask;
6214    unsigned TZ = CountTrailingZeros_32(Mask);
6215    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6216      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6217      for (unsigned i = 3; i != TZ; --i)
6218        Mask ^= 1 << i;
6219    } else
6220      Mask |= 0x10;
6221    MO.setImm(Mask);
6222
6223    // Set up the IT block state according to the IT instruction we just
6224    // matched.
6225    assert(!inITBlock() && "nested IT blocks?!");
6226    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6227    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6228    ITState.CurPosition = 0;
6229    ITState.FirstCond = true;
6230    break;
6231  }
6232  }
6233  return false;
6234}
6235
6236unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6237  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6238  // suffix depending on whether they're in an IT block or not.
6239  unsigned Opc = Inst.getOpcode();
6240  const MCInstrDesc &MCID = getInstDesc(Opc);
6241  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6242    assert(MCID.hasOptionalDef() &&
6243           "optionally flag setting instruction missing optional def operand");
6244    assert(MCID.NumOperands == Inst.getNumOperands() &&
6245           "operand count mismatch!");
6246    // Find the optional-def operand (cc_out).
6247    unsigned OpNo;
6248    for (OpNo = 0;
6249         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6250         ++OpNo)
6251      ;
6252    // If we're parsing Thumb1, reject it completely.
6253    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6254      return Match_MnemonicFail;
6255    // If we're parsing Thumb2, which form is legal depends on whether we're
6256    // in an IT block.
6257    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6258        !inITBlock())
6259      return Match_RequiresITBlock;
6260    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6261        inITBlock())
6262      return Match_RequiresNotITBlock;
6263  }
6264  // Some high-register supporting Thumb1 encodings only allow both registers
6265  // to be from r0-r7 when in Thumb2.
6266  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6267           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6268           isARMLowRegister(Inst.getOperand(2).getReg()))
6269    return Match_RequiresThumb2;
6270  // Others only require ARMv6 or later.
6271  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6272           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6273           isARMLowRegister(Inst.getOperand(1).getReg()))
6274    return Match_RequiresV6;
6275  return Match_Success;
6276}
6277
6278bool ARMAsmParser::
6279MatchAndEmitInstruction(SMLoc IDLoc,
6280                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6281                        MCStreamer &Out) {
6282  MCInst Inst;
6283  unsigned ErrorInfo;
6284  unsigned MatchResult;
6285  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6286  switch (MatchResult) {
6287  default: break;
6288  case Match_Success:
6289    // Context sensitive operand constraints aren't handled by the matcher,
6290    // so check them here.
6291    if (validateInstruction(Inst, Operands)) {
6292      // Still progress the IT block, otherwise one wrong condition causes
6293      // nasty cascading errors.
6294      forwardITPosition();
6295      return true;
6296    }
6297
6298    // Some instructions need post-processing to, for example, tweak which
6299    // encoding is selected. Loop on it while changes happen so the
6300    // individual transformations can chain off each other. E.g.,
6301    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6302    while (processInstruction(Inst, Operands))
6303      ;
6304
6305    // Only move forward at the very end so that everything in validate
6306    // and process gets a consistent answer about whether we're in an IT
6307    // block.
6308    forwardITPosition();
6309
6310    Out.EmitInstruction(Inst);
6311    return false;
6312  case Match_MissingFeature:
6313    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6314    return true;
6315  case Match_InvalidOperand: {
6316    SMLoc ErrorLoc = IDLoc;
6317    if (ErrorInfo != ~0U) {
6318      if (ErrorInfo >= Operands.size())
6319        return Error(IDLoc, "too few operands for instruction");
6320
6321      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6322      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6323    }
6324
6325    return Error(ErrorLoc, "invalid operand for instruction");
6326  }
6327  case Match_MnemonicFail:
6328    return Error(IDLoc, "invalid instruction");
6329  case Match_ConversionFail:
6330    // The converter function will have already emited a diagnostic.
6331    return true;
6332  case Match_RequiresNotITBlock:
6333    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6334  case Match_RequiresITBlock:
6335    return Error(IDLoc, "instruction only valid inside IT block");
6336  case Match_RequiresV6:
6337    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6338  case Match_RequiresThumb2:
6339    return Error(IDLoc, "instruction variant requires Thumb2");
6340  }
6341
6342  llvm_unreachable("Implement any new match types added!");
6343  return true;
6344}
6345
6346/// parseDirective parses the arm specific directives
6347bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6348  StringRef IDVal = DirectiveID.getIdentifier();
6349  if (IDVal == ".word")
6350    return parseDirectiveWord(4, DirectiveID.getLoc());
6351  else if (IDVal == ".thumb")
6352    return parseDirectiveThumb(DirectiveID.getLoc());
6353  else if (IDVal == ".arm")
6354    return parseDirectiveARM(DirectiveID.getLoc());
6355  else if (IDVal == ".thumb_func")
6356    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6357  else if (IDVal == ".code")
6358    return parseDirectiveCode(DirectiveID.getLoc());
6359  else if (IDVal == ".syntax")
6360    return parseDirectiveSyntax(DirectiveID.getLoc());
6361  else if (IDVal == ".unreq")
6362    return parseDirectiveUnreq(DirectiveID.getLoc());
6363  else if (IDVal == ".arch")
6364    return parseDirectiveArch(DirectiveID.getLoc());
6365  else if (IDVal == ".eabi_attribute")
6366    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6367  return true;
6368}
6369
6370/// parseDirectiveWord
6371///  ::= .word [ expression (, expression)* ]
6372bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6373  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6374    for (;;) {
6375      const MCExpr *Value;
6376      if (getParser().ParseExpression(Value))
6377        return true;
6378
6379      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6380
6381      if (getLexer().is(AsmToken::EndOfStatement))
6382        break;
6383
6384      // FIXME: Improve diagnostic.
6385      if (getLexer().isNot(AsmToken::Comma))
6386        return Error(L, "unexpected token in directive");
6387      Parser.Lex();
6388    }
6389  }
6390
6391  Parser.Lex();
6392  return false;
6393}
6394
6395/// parseDirectiveThumb
6396///  ::= .thumb
6397bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6398  if (getLexer().isNot(AsmToken::EndOfStatement))
6399    return Error(L, "unexpected token in directive");
6400  Parser.Lex();
6401
6402  if (!isThumb())
6403    SwitchMode();
6404  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6405  return false;
6406}
6407
6408/// parseDirectiveARM
6409///  ::= .arm
6410bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6411  if (getLexer().isNot(AsmToken::EndOfStatement))
6412    return Error(L, "unexpected token in directive");
6413  Parser.Lex();
6414
6415  if (isThumb())
6416    SwitchMode();
6417  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6418  return false;
6419}
6420
6421/// parseDirectiveThumbFunc
6422///  ::= .thumbfunc symbol_name
6423bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6424  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6425  bool isMachO = MAI.hasSubsectionsViaSymbols();
6426  StringRef Name;
6427  bool needFuncName = true;
6428
6429  // Darwin asm has (optionally) function name after .thumb_func direction
6430  // ELF doesn't
6431  if (isMachO) {
6432    const AsmToken &Tok = Parser.getTok();
6433    if (Tok.isNot(AsmToken::EndOfStatement)) {
6434      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6435        return Error(L, "unexpected token in .thumb_func directive");
6436      Name = Tok.getIdentifier();
6437      Parser.Lex(); // Consume the identifier token.
6438      needFuncName = false;
6439    }
6440  }
6441
6442  if (getLexer().isNot(AsmToken::EndOfStatement))
6443    return Error(L, "unexpected token in directive");
6444
6445  // Eat the end of statement and any blank lines that follow.
6446  while (getLexer().is(AsmToken::EndOfStatement))
6447    Parser.Lex();
6448
6449  // FIXME: assuming function name will be the line following .thumb_func
6450  // We really should be checking the next symbol definition even if there's
6451  // stuff in between.
6452  if (needFuncName) {
6453    Name = Parser.getTok().getIdentifier();
6454  }
6455
6456  // Mark symbol as a thumb symbol.
6457  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6458  getParser().getStreamer().EmitThumbFunc(Func);
6459  return false;
6460}
6461
6462/// parseDirectiveSyntax
6463///  ::= .syntax unified | divided
6464bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6465  const AsmToken &Tok = Parser.getTok();
6466  if (Tok.isNot(AsmToken::Identifier))
6467    return Error(L, "unexpected token in .syntax directive");
6468  StringRef Mode = Tok.getString();
6469  if (Mode == "unified" || Mode == "UNIFIED")
6470    Parser.Lex();
6471  else if (Mode == "divided" || Mode == "DIVIDED")
6472    return Error(L, "'.syntax divided' arm asssembly not supported");
6473  else
6474    return Error(L, "unrecognized syntax mode in .syntax directive");
6475
6476  if (getLexer().isNot(AsmToken::EndOfStatement))
6477    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6478  Parser.Lex();
6479
6480  // TODO tell the MC streamer the mode
6481  // getParser().getStreamer().Emit???();
6482  return false;
6483}
6484
6485/// parseDirectiveCode
6486///  ::= .code 16 | 32
6487bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6488  const AsmToken &Tok = Parser.getTok();
6489  if (Tok.isNot(AsmToken::Integer))
6490    return Error(L, "unexpected token in .code directive");
6491  int64_t Val = Parser.getTok().getIntVal();
6492  if (Val == 16)
6493    Parser.Lex();
6494  else if (Val == 32)
6495    Parser.Lex();
6496  else
6497    return Error(L, "invalid operand to .code directive");
6498
6499  if (getLexer().isNot(AsmToken::EndOfStatement))
6500    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6501  Parser.Lex();
6502
6503  if (Val == 16) {
6504    if (!isThumb())
6505      SwitchMode();
6506    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6507  } else {
6508    if (isThumb())
6509      SwitchMode();
6510    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6511  }
6512
6513  return false;
6514}
6515
6516/// parseDirectiveReq
6517///  ::= name .req registername
6518bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6519  Parser.Lex(); // Eat the '.req' token.
6520  unsigned Reg;
6521  SMLoc SRegLoc, ERegLoc;
6522  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6523    Parser.EatToEndOfStatement();
6524    return Error(SRegLoc, "register name expected");
6525  }
6526
6527  // Shouldn't be anything else.
6528  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6529    Parser.EatToEndOfStatement();
6530    return Error(Parser.getTok().getLoc(),
6531                 "unexpected input in .req directive.");
6532  }
6533
6534  Parser.Lex(); // Consume the EndOfStatement
6535
6536  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6537    return Error(SRegLoc, "redefinition of '" + Name +
6538                          "' does not match original.");
6539
6540  return false;
6541}
6542
6543/// parseDirectiveUneq
6544///  ::= .unreq registername
6545bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6546  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6547    Parser.EatToEndOfStatement();
6548    return Error(L, "unexpected input in .unreq directive.");
6549  }
6550  RegisterReqs.erase(Parser.getTok().getIdentifier());
6551  Parser.Lex(); // Eat the identifier.
6552  return false;
6553}
6554
6555/// parseDirectiveArch
6556///  ::= .arch token
6557bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6558  return true;
6559}
6560
6561/// parseDirectiveEabiAttr
6562///  ::= .eabi_attribute int, int
6563bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6564  return true;
6565}
6566
6567extern "C" void LLVMInitializeARMAsmLexer();
6568
6569/// Force static initialization.
6570extern "C" void LLVMInitializeARMAsmParser() {
6571  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6572  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6573  LLVMInitializeARMAsmLexer();
6574}
6575
6576#define GET_REGISTER_MATCHER
6577#define GET_MATCHER_IMPLEMENTATION
6578#include "ARMGenAsmMatcher.inc"
6579