ARMAsmParser.cpp revision 18c8d12dea944086ef0ce2f674ca8a34de2bbd74
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_FPImmediate,
274    k_MemBarrierOpt,
275    k_Memory,
276    k_PostIndexRegister,
277    k_MSRMask,
278    k_ProcIFlags,
279    k_VectorIndex,
280    k_Register,
281    k_RegisterList,
282    k_DPRRegisterList,
283    k_SPRRegisterList,
284    k_VectorList,
285    k_VectorListAllLanes,
286    k_VectorListIndexed,
287    k_ShiftedRegister,
288    k_ShiftedImmediate,
289    k_ShifterImmediate,
290    k_RotateImmediate,
291    k_BitfieldDescriptor,
292    k_Token
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296  SmallVector<unsigned, 8> Registers;
297
298  union {
299    struct {
300      ARMCC::CondCodes Val;
301    } CC;
302
303    struct {
304      unsigned Val;
305    } Cop;
306
307    struct {
308      unsigned Val;
309    } CoprocOption;
310
311    struct {
312      unsigned Mask:4;
313    } ITMask;
314
315    struct {
316      ARM_MB::MemBOpt Val;
317    } MBOpt;
318
319    struct {
320      ARM_PROC::IFlags Val;
321    } IFlags;
322
323    struct {
324      unsigned Val;
325    } MMask;
326
327    struct {
328      const char *Data;
329      unsigned Length;
330    } Tok;
331
332    struct {
333      unsigned RegNum;
334    } Reg;
335
336    // A vector register list is a sequential list of 1 to 4 registers.
337    struct {
338      unsigned RegNum;
339      unsigned Count;
340      unsigned LaneIndex;
341      bool isDoubleSpaced;
342    } VectorList;
343
344    struct {
345      unsigned Val;
346    } VectorIndex;
347
348    struct {
349      const MCExpr *Val;
350    } Imm;
351
352    struct {
353      unsigned Val;       // encoded 8-bit representation
354    } FPImm;
355
356    /// Combined record for all forms of ARM address expressions.
357    struct {
358      unsigned BaseRegNum;
359      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
360      // was specified.
361      const MCConstantExpr *OffsetImm;  // Offset immediate value
362      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
363      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
364      unsigned ShiftImm;        // shift for OffsetReg.
365      unsigned Alignment;       // 0 = no alignment specified
366                                // n = alignment in bytes (2, 4, 8, 16, or 32)
367      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
368    } Memory;
369
370    struct {
371      unsigned RegNum;
372      bool isAdd;
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned ShiftImm;
375    } PostIdxReg;
376
377    struct {
378      bool isASR;
379      unsigned Imm;
380    } ShifterImm;
381    struct {
382      ARM_AM::ShiftOpc ShiftTy;
383      unsigned SrcReg;
384      unsigned ShiftReg;
385      unsigned ShiftImm;
386    } RegShiftedReg;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftImm;
391    } RegShiftedImm;
392    struct {
393      unsigned Imm;
394    } RotImm;
395    struct {
396      unsigned LSB;
397      unsigned Width;
398    } Bitfield;
399  };
400
401  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
402public:
403  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
404    Kind = o.Kind;
405    StartLoc = o.StartLoc;
406    EndLoc = o.EndLoc;
407    switch (Kind) {
408    case k_CondCode:
409      CC = o.CC;
410      break;
411    case k_ITCondMask:
412      ITMask = o.ITMask;
413      break;
414    case k_Token:
415      Tok = o.Tok;
416      break;
417    case k_CCOut:
418    case k_Register:
419      Reg = o.Reg;
420      break;
421    case k_RegisterList:
422    case k_DPRRegisterList:
423    case k_SPRRegisterList:
424      Registers = o.Registers;
425      break;
426    case k_VectorList:
427    case k_VectorListAllLanes:
428    case k_VectorListIndexed:
429      VectorList = o.VectorList;
430      break;
431    case k_CoprocNum:
432    case k_CoprocReg:
433      Cop = o.Cop;
434      break;
435    case k_CoprocOption:
436      CoprocOption = o.CoprocOption;
437      break;
438    case k_Immediate:
439      Imm = o.Imm;
440      break;
441    case k_FPImmediate:
442      FPImm = o.FPImm;
443      break;
444    case k_MemBarrierOpt:
445      MBOpt = o.MBOpt;
446      break;
447    case k_Memory:
448      Memory = o.Memory;
449      break;
450    case k_PostIndexRegister:
451      PostIdxReg = o.PostIdxReg;
452      break;
453    case k_MSRMask:
454      MMask = o.MMask;
455      break;
456    case k_ProcIFlags:
457      IFlags = o.IFlags;
458      break;
459    case k_ShifterImmediate:
460      ShifterImm = o.ShifterImm;
461      break;
462    case k_ShiftedRegister:
463      RegShiftedReg = o.RegShiftedReg;
464      break;
465    case k_ShiftedImmediate:
466      RegShiftedImm = o.RegShiftedImm;
467      break;
468    case k_RotateImmediate:
469      RotImm = o.RotImm;
470      break;
471    case k_BitfieldDescriptor:
472      Bitfield = o.Bitfield;
473      break;
474    case k_VectorIndex:
475      VectorIndex = o.VectorIndex;
476      break;
477    }
478  }
479
480  /// getStartLoc - Get the location of the first token of this operand.
481  SMLoc getStartLoc() const { return StartLoc; }
482  /// getEndLoc - Get the location of the last token of this operand.
483  SMLoc getEndLoc() const { return EndLoc; }
484
485  ARMCC::CondCodes getCondCode() const {
486    assert(Kind == k_CondCode && "Invalid access!");
487    return CC.Val;
488  }
489
490  unsigned getCoproc() const {
491    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
492    return Cop.Val;
493  }
494
495  StringRef getToken() const {
496    assert(Kind == k_Token && "Invalid access!");
497    return StringRef(Tok.Data, Tok.Length);
498  }
499
500  unsigned getReg() const {
501    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
502    return Reg.RegNum;
503  }
504
505  const SmallVectorImpl<unsigned> &getRegList() const {
506    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
507            Kind == k_SPRRegisterList) && "Invalid access!");
508    return Registers;
509  }
510
511  const MCExpr *getImm() const {
512    assert(Kind == k_Immediate && "Invalid access!");
513    return Imm.Val;
514  }
515
516  unsigned getFPImm() const {
517    assert(Kind == k_FPImmediate && "Invalid access!");
518    return FPImm.Val;
519  }
520
521  unsigned getVectorIndex() const {
522    assert(Kind == k_VectorIndex && "Invalid access!");
523    return VectorIndex.Val;
524  }
525
526  ARM_MB::MemBOpt getMemBarrierOpt() const {
527    assert(Kind == k_MemBarrierOpt && "Invalid access!");
528    return MBOpt.Val;
529  }
530
531  ARM_PROC::IFlags getProcIFlags() const {
532    assert(Kind == k_ProcIFlags && "Invalid access!");
533    return IFlags.Val;
534  }
535
536  unsigned getMSRMask() const {
537    assert(Kind == k_MSRMask && "Invalid access!");
538    return MMask.Val;
539  }
540
541  bool isCoprocNum() const { return Kind == k_CoprocNum; }
542  bool isCoprocReg() const { return Kind == k_CoprocReg; }
543  bool isCoprocOption() const { return Kind == k_CoprocOption; }
544  bool isCondCode() const { return Kind == k_CondCode; }
545  bool isCCOut() const { return Kind == k_CCOut; }
546  bool isITMask() const { return Kind == k_ITCondMask; }
547  bool isITCondCode() const { return Kind == k_CondCode; }
548  bool isImm() const { return Kind == k_Immediate; }
549  bool isFPImm() const { return Kind == k_FPImmediate; }
550  bool isImm8s4() const {
551    if (Kind != k_Immediate)
552      return false;
553    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
554    if (!CE) return false;
555    int64_t Value = CE->getValue();
556    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
557  }
558  bool isImm0_1020s4() const {
559    if (Kind != k_Immediate)
560      return false;
561    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
562    if (!CE) return false;
563    int64_t Value = CE->getValue();
564    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
565  }
566  bool isImm0_508s4() const {
567    if (Kind != k_Immediate)
568      return false;
569    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
570    if (!CE) return false;
571    int64_t Value = CE->getValue();
572    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
573  }
574  bool isImm0_255() const {
575    if (Kind != k_Immediate)
576      return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return Value >= 0 && Value < 256;
581  }
582  bool isImm0_1() const {
583    if (Kind != k_Immediate)
584      return false;
585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
586    if (!CE) return false;
587    int64_t Value = CE->getValue();
588    return Value >= 0 && Value < 2;
589  }
590  bool isImm0_3() const {
591    if (Kind != k_Immediate)
592      return false;
593    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
594    if (!CE) return false;
595    int64_t Value = CE->getValue();
596    return Value >= 0 && Value < 4;
597  }
598  bool isImm0_7() const {
599    if (Kind != k_Immediate)
600      return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 8;
605  }
606  bool isImm0_15() const {
607    if (Kind != k_Immediate)
608      return false;
609    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
610    if (!CE) return false;
611    int64_t Value = CE->getValue();
612    return Value >= 0 && Value < 16;
613  }
614  bool isImm0_31() const {
615    if (Kind != k_Immediate)
616      return false;
617    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
618    if (!CE) return false;
619    int64_t Value = CE->getValue();
620    return Value >= 0 && Value < 32;
621  }
622  bool isImm0_63() const {
623    if (Kind != k_Immediate)
624      return false;
625    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
626    if (!CE) return false;
627    int64_t Value = CE->getValue();
628    return Value >= 0 && Value < 64;
629  }
630  bool isImm8() const {
631    if (Kind != k_Immediate)
632      return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 8;
637  }
638  bool isImm16() const {
639    if (Kind != k_Immediate)
640      return false;
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642    if (!CE) return false;
643    int64_t Value = CE->getValue();
644    return Value == 16;
645  }
646  bool isImm32() const {
647    if (Kind != k_Immediate)
648      return false;
649    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
650    if (!CE) return false;
651    int64_t Value = CE->getValue();
652    return Value == 32;
653  }
654  bool isShrImm8() const {
655    if (Kind != k_Immediate)
656      return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 8;
661  }
662  bool isShrImm16() const {
663    if (Kind != k_Immediate)
664      return false;
665    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666    if (!CE) return false;
667    int64_t Value = CE->getValue();
668    return Value > 0 && Value <= 16;
669  }
670  bool isShrImm32() const {
671    if (Kind != k_Immediate)
672      return false;
673    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
674    if (!CE) return false;
675    int64_t Value = CE->getValue();
676    return Value > 0 && Value <= 32;
677  }
678  bool isShrImm64() const {
679    if (Kind != k_Immediate)
680      return false;
681    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
682    if (!CE) return false;
683    int64_t Value = CE->getValue();
684    return Value > 0 && Value <= 64;
685  }
686  bool isImm1_7() const {
687    if (Kind != k_Immediate)
688      return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value > 0 && Value < 8;
693  }
694  bool isImm1_15() const {
695    if (Kind != k_Immediate)
696      return false;
697    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
698    if (!CE) return false;
699    int64_t Value = CE->getValue();
700    return Value > 0 && Value < 16;
701  }
702  bool isImm1_31() const {
703    if (Kind != k_Immediate)
704      return false;
705    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706    if (!CE) return false;
707    int64_t Value = CE->getValue();
708    return Value > 0 && Value < 32;
709  }
710  bool isImm1_16() const {
711    if (Kind != k_Immediate)
712      return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value > 0 && Value < 17;
717  }
718  bool isImm1_32() const {
719    if (Kind != k_Immediate)
720      return false;
721    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
722    if (!CE) return false;
723    int64_t Value = CE->getValue();
724    return Value > 0 && Value < 33;
725  }
726  bool isImm0_32() const {
727    if (Kind != k_Immediate)
728      return false;
729    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
730    if (!CE) return false;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value < 33;
733  }
734  bool isImm0_65535() const {
735    if (Kind != k_Immediate)
736      return false;
737    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
738    if (!CE) return false;
739    int64_t Value = CE->getValue();
740    return Value >= 0 && Value < 65536;
741  }
742  bool isImm0_65535Expr() const {
743    if (Kind != k_Immediate)
744      return false;
745    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
746    // If it's not a constant expression, it'll generate a fixup and be
747    // handled later.
748    if (!CE) return true;
749    int64_t Value = CE->getValue();
750    return Value >= 0 && Value < 65536;
751  }
752  bool isImm24bit() const {
753    if (Kind != k_Immediate)
754      return false;
755    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
756    if (!CE) return false;
757    int64_t Value = CE->getValue();
758    return Value >= 0 && Value <= 0xffffff;
759  }
760  bool isImmThumbSR() const {
761    if (Kind != k_Immediate)
762      return false;
763    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764    if (!CE) return false;
765    int64_t Value = CE->getValue();
766    return Value > 0 && Value < 33;
767  }
768  bool isPKHLSLImm() const {
769    if (Kind != k_Immediate)
770      return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return Value >= 0 && Value < 32;
775  }
776  bool isPKHASRImm() const {
777    if (Kind != k_Immediate)
778      return false;
779    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
780    if (!CE) return false;
781    int64_t Value = CE->getValue();
782    return Value > 0 && Value <= 32;
783  }
784  bool isARMSOImm() const {
785    if (Kind != k_Immediate)
786      return false;
787    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
788    if (!CE) return false;
789    int64_t Value = CE->getValue();
790    return ARM_AM::getSOImmVal(Value) != -1;
791  }
792  bool isARMSOImmNot() const {
793    if (Kind != k_Immediate)
794      return false;
795    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
796    if (!CE) return false;
797    int64_t Value = CE->getValue();
798    return ARM_AM::getSOImmVal(~Value) != -1;
799  }
800  bool isARMSOImmNeg() const {
801    if (Kind != k_Immediate)
802      return false;
803    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
804    if (!CE) return false;
805    int64_t Value = CE->getValue();
806    return ARM_AM::getSOImmVal(-Value) != -1;
807  }
808  bool isT2SOImm() const {
809    if (Kind != k_Immediate)
810      return false;
811    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
812    if (!CE) return false;
813    int64_t Value = CE->getValue();
814    return ARM_AM::getT2SOImmVal(Value) != -1;
815  }
816  bool isT2SOImmNot() const {
817    if (Kind != k_Immediate)
818      return false;
819    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
820    if (!CE) return false;
821    int64_t Value = CE->getValue();
822    return ARM_AM::getT2SOImmVal(~Value) != -1;
823  }
824  bool isT2SOImmNeg() const {
825    if (Kind != k_Immediate)
826      return false;
827    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828    if (!CE) return false;
829    int64_t Value = CE->getValue();
830    return ARM_AM::getT2SOImmVal(-Value) != -1;
831  }
832  bool isSetEndImm() const {
833    if (Kind != k_Immediate)
834      return false;
835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836    if (!CE) return false;
837    int64_t Value = CE->getValue();
838    return Value == 1 || Value == 0;
839  }
840  bool isReg() const { return Kind == k_Register; }
841  bool isRegList() const { return Kind == k_RegisterList; }
842  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
843  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
844  bool isToken() const { return Kind == k_Token; }
845  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
846  bool isMemory() const { return Kind == k_Memory; }
847  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
848  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
849  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
850  bool isRotImm() const { return Kind == k_RotateImmediate; }
851  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
852  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
853  bool isPostIdxReg() const {
854    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
855  }
856  bool isMemNoOffset(bool alignOK = false) const {
857    if (!isMemory())
858      return false;
859    // No offset of any kind.
860    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
861     (alignOK || Memory.Alignment == 0);
862  }
863  bool isAlignedMemory() const {
864    return isMemNoOffset(true);
865  }
866  bool isAddrMode2() const {
867    if (!isMemory() || Memory.Alignment != 0) return false;
868    // Check for register offset.
869    if (Memory.OffsetRegNum) return true;
870    // Immediate offset in range [-4095, 4095].
871    if (!Memory.OffsetImm) return true;
872    int64_t Val = Memory.OffsetImm->getValue();
873    return Val > -4096 && Val < 4096;
874  }
875  bool isAM2OffsetImm() const {
876    if (Kind != k_Immediate)
877      return false;
878    // Immediate offset in range [-4095, 4095].
879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
880    if (!CE) return false;
881    int64_t Val = CE->getValue();
882    return Val > -4096 && Val < 4096;
883  }
884  bool isAddrMode3() const {
885    // If we have an immediate that's not a constant, treat it as a label
886    // reference needing a fixup. If it is a constant, it's something else
887    // and we reject it.
888    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
889      return true;
890    if (!isMemory() || Memory.Alignment != 0) return false;
891    // No shifts are legal for AM3.
892    if (Memory.ShiftType != ARM_AM::no_shift) return false;
893    // Check for register offset.
894    if (Memory.OffsetRegNum) return true;
895    // Immediate offset in range [-255, 255].
896    if (!Memory.OffsetImm) return true;
897    int64_t Val = Memory.OffsetImm->getValue();
898    return Val > -256 && Val < 256;
899  }
900  bool isAM3Offset() const {
901    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
902      return false;
903    if (Kind == k_PostIndexRegister)
904      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
905    // Immediate offset in range [-255, 255].
906    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
907    if (!CE) return false;
908    int64_t Val = CE->getValue();
909    // Special case, #-0 is INT32_MIN.
910    return (Val > -256 && Val < 256) || Val == INT32_MIN;
911  }
912  bool isAddrMode5() const {
913    // If we have an immediate that's not a constant, treat it as a label
914    // reference needing a fixup. If it is a constant, it's something else
915    // and we reject it.
916    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
917      return true;
918    if (!isMemory() || Memory.Alignment != 0) return false;
919    // Check for register offset.
920    if (Memory.OffsetRegNum) return false;
921    // Immediate offset in range [-1020, 1020] and a multiple of 4.
922    if (!Memory.OffsetImm) return true;
923    int64_t Val = Memory.OffsetImm->getValue();
924    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
925      Val == INT32_MIN;
926  }
927  bool isMemTBB() const {
928    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
929        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
930      return false;
931    return true;
932  }
933  bool isMemTBH() const {
934    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
935        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
936        Memory.Alignment != 0 )
937      return false;
938    return true;
939  }
940  bool isMemRegOffset() const {
941    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
942      return false;
943    return true;
944  }
945  bool isT2MemRegOffset() const {
946    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
947        Memory.Alignment != 0)
948      return false;
949    // Only lsl #{0, 1, 2, 3} allowed.
950    if (Memory.ShiftType == ARM_AM::no_shift)
951      return true;
952    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
953      return false;
954    return true;
955  }
956  bool isMemThumbRR() const {
957    // Thumb reg+reg addressing is simple. Just two registers, a base and
958    // an offset. No shifts, negations or any other complicating factors.
959    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
960        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
961      return false;
962    return isARMLowRegister(Memory.BaseRegNum) &&
963      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
964  }
965  bool isMemThumbRIs4() const {
966    if (!isMemory() || Memory.OffsetRegNum != 0 ||
967        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
968      return false;
969    // Immediate offset, multiple of 4 in range [0, 124].
970    if (!Memory.OffsetImm) return true;
971    int64_t Val = Memory.OffsetImm->getValue();
972    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
973  }
974  bool isMemThumbRIs2() const {
975    if (!isMemory() || Memory.OffsetRegNum != 0 ||
976        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
977      return false;
978    // Immediate offset, multiple of 4 in range [0, 62].
979    if (!Memory.OffsetImm) return true;
980    int64_t Val = Memory.OffsetImm->getValue();
981    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
982  }
983  bool isMemThumbRIs1() const {
984    if (!isMemory() || Memory.OffsetRegNum != 0 ||
985        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
986      return false;
987    // Immediate offset in range [0, 31].
988    if (!Memory.OffsetImm) return true;
989    int64_t Val = Memory.OffsetImm->getValue();
990    return Val >= 0 && Val <= 31;
991  }
992  bool isMemThumbSPI() const {
993    if (!isMemory() || Memory.OffsetRegNum != 0 ||
994        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
995      return false;
996    // Immediate offset, multiple of 4 in range [0, 1020].
997    if (!Memory.OffsetImm) return true;
998    int64_t Val = Memory.OffsetImm->getValue();
999    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1000  }
1001  bool isMemImm8s4Offset() const {
1002    // If we have an immediate that's not a constant, treat it as a label
1003    // reference needing a fixup. If it is a constant, it's something else
1004    // and we reject it.
1005    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1006      return true;
1007    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1008      return false;
1009    // Immediate offset a multiple of 4 in range [-1020, 1020].
1010    if (!Memory.OffsetImm) return true;
1011    int64_t Val = Memory.OffsetImm->getValue();
1012    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1013  }
1014  bool isMemImm0_1020s4Offset() const {
1015    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1016      return false;
1017    // Immediate offset a multiple of 4 in range [0, 1020].
1018    if (!Memory.OffsetImm) return true;
1019    int64_t Val = Memory.OffsetImm->getValue();
1020    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1021  }
1022  bool isMemImm8Offset() const {
1023    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1024      return false;
1025    // Immediate offset in range [-255, 255].
1026    if (!Memory.OffsetImm) return true;
1027    int64_t Val = Memory.OffsetImm->getValue();
1028    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1029  }
1030  bool isMemPosImm8Offset() const {
1031    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1032      return false;
1033    // Immediate offset in range [0, 255].
1034    if (!Memory.OffsetImm) return true;
1035    int64_t Val = Memory.OffsetImm->getValue();
1036    return Val >= 0 && Val < 256;
1037  }
1038  bool isMemNegImm8Offset() const {
1039    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1040      return false;
1041    // Immediate offset in range [-255, -1].
1042    if (!Memory.OffsetImm) return false;
1043    int64_t Val = Memory.OffsetImm->getValue();
1044    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1045  }
1046  bool isMemUImm12Offset() const {
1047    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1048      return false;
1049    // Immediate offset in range [0, 4095].
1050    if (!Memory.OffsetImm) return true;
1051    int64_t Val = Memory.OffsetImm->getValue();
1052    return (Val >= 0 && Val < 4096);
1053  }
1054  bool isMemImm12Offset() const {
1055    // If we have an immediate that's not a constant, treat it as a label
1056    // reference needing a fixup. If it is a constant, it's something else
1057    // and we reject it.
1058    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1059      return true;
1060
1061    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1062      return false;
1063    // Immediate offset in range [-4095, 4095].
1064    if (!Memory.OffsetImm) return true;
1065    int64_t Val = Memory.OffsetImm->getValue();
1066    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1067  }
1068  bool isPostIdxImm8() const {
1069    if (Kind != k_Immediate)
1070      return false;
1071    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1072    if (!CE) return false;
1073    int64_t Val = CE->getValue();
1074    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1075  }
1076  bool isPostIdxImm8s4() const {
1077    if (Kind != k_Immediate)
1078      return false;
1079    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1080    if (!CE) return false;
1081    int64_t Val = CE->getValue();
1082    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1083      (Val == INT32_MIN);
1084  }
1085
1086  bool isMSRMask() const { return Kind == k_MSRMask; }
1087  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1088
1089  // NEON operands.
1090  bool isSingleSpacedVectorList() const {
1091    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1092  }
1093  bool isDoubleSpacedVectorList() const {
1094    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1095  }
1096  bool isVecListOneD() const {
1097    if (!isSingleSpacedVectorList()) return false;
1098    return VectorList.Count == 1;
1099  }
1100
1101  bool isVecListTwoD() const {
1102    if (!isSingleSpacedVectorList()) return false;
1103    return VectorList.Count == 2;
1104  }
1105
1106  bool isVecListThreeD() const {
1107    if (!isSingleSpacedVectorList()) return false;
1108    return VectorList.Count == 3;
1109  }
1110
1111  bool isVecListFourD() const {
1112    if (!isSingleSpacedVectorList()) return false;
1113    return VectorList.Count == 4;
1114  }
1115
1116  bool isVecListTwoQ() const {
1117    if (!isDoubleSpacedVectorList()) return false;
1118    return VectorList.Count == 2;
1119  }
1120
1121  bool isSingleSpacedVectorAllLanes() const {
1122    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1123  }
1124  bool isDoubleSpacedVectorAllLanes() const {
1125    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1126  }
1127  bool isVecListOneDAllLanes() const {
1128    if (!isSingleSpacedVectorAllLanes()) return false;
1129    return VectorList.Count == 1;
1130  }
1131
1132  bool isVecListTwoDAllLanes() const {
1133    if (!isSingleSpacedVectorAllLanes()) return false;
1134    return VectorList.Count == 2;
1135  }
1136
1137  bool isVecListTwoQAllLanes() const {
1138    if (!isDoubleSpacedVectorAllLanes()) return false;
1139    return VectorList.Count == 2;
1140  }
1141
1142  bool isSingleSpacedVectorIndexed() const {
1143    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1144  }
1145  bool isDoubleSpacedVectorIndexed() const {
1146    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1147  }
1148  bool isVecListOneDByteIndexed() const {
1149    if (!isSingleSpacedVectorIndexed()) return false;
1150    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1151  }
1152
1153  bool isVecListOneDHWordIndexed() const {
1154    if (!isSingleSpacedVectorIndexed()) return false;
1155    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1156  }
1157
1158  bool isVecListOneDWordIndexed() const {
1159    if (!isSingleSpacedVectorIndexed()) return false;
1160    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1161  }
1162
1163  bool isVecListTwoDByteIndexed() const {
1164    if (!isSingleSpacedVectorIndexed()) return false;
1165    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1166  }
1167
1168  bool isVecListTwoDHWordIndexed() const {
1169    if (!isSingleSpacedVectorIndexed()) return false;
1170    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1171  }
1172
1173  bool isVecListTwoQWordIndexed() const {
1174    if (!isDoubleSpacedVectorIndexed()) return false;
1175    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1176  }
1177
1178  bool isVecListTwoQHWordIndexed() const {
1179    if (!isDoubleSpacedVectorIndexed()) return false;
1180    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1181  }
1182
1183  bool isVecListTwoDWordIndexed() const {
1184    if (!isSingleSpacedVectorIndexed()) return false;
1185    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1186  }
1187
1188  bool isVectorIndex8() const {
1189    if (Kind != k_VectorIndex) return false;
1190    return VectorIndex.Val < 8;
1191  }
1192  bool isVectorIndex16() const {
1193    if (Kind != k_VectorIndex) return false;
1194    return VectorIndex.Val < 4;
1195  }
1196  bool isVectorIndex32() const {
1197    if (Kind != k_VectorIndex) return false;
1198    return VectorIndex.Val < 2;
1199  }
1200
1201  bool isNEONi8splat() const {
1202    if (Kind != k_Immediate)
1203      return false;
1204    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1205    // Must be a constant.
1206    if (!CE) return false;
1207    int64_t Value = CE->getValue();
1208    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1209    // value.
1210    return Value >= 0 && Value < 256;
1211  }
1212
1213  bool isNEONi16splat() const {
1214    if (Kind != k_Immediate)
1215      return false;
1216    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1217    // Must be a constant.
1218    if (!CE) return false;
1219    int64_t Value = CE->getValue();
1220    // i16 value in the range [0,255] or [0x0100, 0xff00]
1221    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1222  }
1223
1224  bool isNEONi32splat() const {
1225    if (Kind != k_Immediate)
1226      return false;
1227    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1228    // Must be a constant.
1229    if (!CE) return false;
1230    int64_t Value = CE->getValue();
1231    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1232    return (Value >= 0 && Value < 256) ||
1233      (Value >= 0x0100 && Value <= 0xff00) ||
1234      (Value >= 0x010000 && Value <= 0xff0000) ||
1235      (Value >= 0x01000000 && Value <= 0xff000000);
1236  }
1237
1238  bool isNEONi32vmov() const {
1239    if (Kind != k_Immediate)
1240      return false;
1241    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1242    // Must be a constant.
1243    if (!CE) return false;
1244    int64_t Value = CE->getValue();
1245    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1246    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1247    return (Value >= 0 && Value < 256) ||
1248      (Value >= 0x0100 && Value <= 0xff00) ||
1249      (Value >= 0x010000 && Value <= 0xff0000) ||
1250      (Value >= 0x01000000 && Value <= 0xff000000) ||
1251      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1252      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1253  }
1254  bool isNEONi32vmovNeg() const {
1255    if (Kind != k_Immediate)
1256      return false;
1257    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1258    // Must be a constant.
1259    if (!CE) return false;
1260    int64_t Value = ~CE->getValue();
1261    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1262    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1263    return (Value >= 0 && Value < 256) ||
1264      (Value >= 0x0100 && Value <= 0xff00) ||
1265      (Value >= 0x010000 && Value <= 0xff0000) ||
1266      (Value >= 0x01000000 && Value <= 0xff000000) ||
1267      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1268      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1269  }
1270
1271  bool isNEONi64splat() const {
1272    if (Kind != k_Immediate)
1273      return false;
1274    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1275    // Must be a constant.
1276    if (!CE) return false;
1277    uint64_t Value = CE->getValue();
1278    // i64 value with each byte being either 0 or 0xff.
1279    for (unsigned i = 0; i < 8; ++i)
1280      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1281    return true;
1282  }
1283
1284  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1285    // Add as immediates when possible.  Null MCExpr = 0.
1286    if (Expr == 0)
1287      Inst.addOperand(MCOperand::CreateImm(0));
1288    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1289      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1290    else
1291      Inst.addOperand(MCOperand::CreateExpr(Expr));
1292  }
1293
1294  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1295    assert(N == 2 && "Invalid number of operands!");
1296    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1297    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1298    Inst.addOperand(MCOperand::CreateReg(RegNum));
1299  }
1300
1301  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1302    assert(N == 1 && "Invalid number of operands!");
1303    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1304  }
1305
1306  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1307    assert(N == 1 && "Invalid number of operands!");
1308    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1309  }
1310
1311  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1312    assert(N == 1 && "Invalid number of operands!");
1313    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1314  }
1315
1316  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1317    assert(N == 1 && "Invalid number of operands!");
1318    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1319  }
1320
1321  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1322    assert(N == 1 && "Invalid number of operands!");
1323    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1324  }
1325
1326  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1327    assert(N == 1 && "Invalid number of operands!");
1328    Inst.addOperand(MCOperand::CreateReg(getReg()));
1329  }
1330
1331  void addRegOperands(MCInst &Inst, unsigned N) const {
1332    assert(N == 1 && "Invalid number of operands!");
1333    Inst.addOperand(MCOperand::CreateReg(getReg()));
1334  }
1335
1336  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1337    assert(N == 3 && "Invalid number of operands!");
1338    assert(isRegShiftedReg() &&
1339           "addRegShiftedRegOperands() on non RegShiftedReg!");
1340    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1341    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1342    Inst.addOperand(MCOperand::CreateImm(
1343      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1344  }
1345
1346  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1347    assert(N == 2 && "Invalid number of operands!");
1348    assert(isRegShiftedImm() &&
1349           "addRegShiftedImmOperands() on non RegShiftedImm!");
1350    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1351    Inst.addOperand(MCOperand::CreateImm(
1352      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1353  }
1354
1355  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1356    assert(N == 1 && "Invalid number of operands!");
1357    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1358                                         ShifterImm.Imm));
1359  }
1360
1361  void addRegListOperands(MCInst &Inst, unsigned N) const {
1362    assert(N == 1 && "Invalid number of operands!");
1363    const SmallVectorImpl<unsigned> &RegList = getRegList();
1364    for (SmallVectorImpl<unsigned>::const_iterator
1365           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1366      Inst.addOperand(MCOperand::CreateReg(*I));
1367  }
1368
1369  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1370    addRegListOperands(Inst, N);
1371  }
1372
1373  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1374    addRegListOperands(Inst, N);
1375  }
1376
1377  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1378    assert(N == 1 && "Invalid number of operands!");
1379    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1380    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1381  }
1382
1383  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    // Munge the lsb/width into a bitfield mask.
1386    unsigned lsb = Bitfield.LSB;
1387    unsigned width = Bitfield.Width;
1388    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1389    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1390                      (32 - (lsb + width)));
1391    Inst.addOperand(MCOperand::CreateImm(Mask));
1392  }
1393
1394  void addImmOperands(MCInst &Inst, unsigned N) const {
1395    assert(N == 1 && "Invalid number of operands!");
1396    addExpr(Inst, getImm());
1397  }
1398
1399  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1402  }
1403
1404  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1405    assert(N == 1 && "Invalid number of operands!");
1406    // FIXME: We really want to scale the value here, but the LDRD/STRD
1407    // instruction don't encode operands that way yet.
1408    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1409    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1410  }
1411
1412  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1413    assert(N == 1 && "Invalid number of operands!");
1414    // The immediate is scaled by four in the encoding and is stored
1415    // in the MCInst as such. Lop off the low two bits here.
1416    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1417    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1418  }
1419
1420  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1421    assert(N == 1 && "Invalid number of operands!");
1422    // The immediate is scaled by four in the encoding and is stored
1423    // in the MCInst as such. Lop off the low two bits here.
1424    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1425    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1426  }
1427
1428  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1429    assert(N == 1 && "Invalid number of operands!");
1430    // The constant encodes as the immediate-1, and we store in the instruction
1431    // the bits as encoded, so subtract off one here.
1432    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1433    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1434  }
1435
1436  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1437    assert(N == 1 && "Invalid number of operands!");
1438    // The constant encodes as the immediate-1, and we store in the instruction
1439    // the bits as encoded, so subtract off one here.
1440    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1441    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1442  }
1443
1444  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1445    assert(N == 1 && "Invalid number of operands!");
1446    // The constant encodes as the immediate, except for 32, which encodes as
1447    // zero.
1448    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1449    unsigned Imm = CE->getValue();
1450    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1451  }
1452
1453  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1454    assert(N == 1 && "Invalid number of operands!");
1455    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1456    // the instruction as well.
1457    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1458    int Val = CE->getValue();
1459    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1460  }
1461
1462  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1463    assert(N == 1 && "Invalid number of operands!");
1464    // The operand is actually a t2_so_imm, but we have its bitwise
1465    // negation in the assembly source, so twiddle it here.
1466    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1467    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1468  }
1469
1470  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1471    assert(N == 1 && "Invalid number of operands!");
1472    // The operand is actually a t2_so_imm, but we have its
1473    // negation in the assembly source, so twiddle it here.
1474    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1475    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1476  }
1477
1478  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1479    assert(N == 1 && "Invalid number of operands!");
1480    // The operand is actually a so_imm, but we have its bitwise
1481    // negation in the assembly source, so twiddle it here.
1482    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1483    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1484  }
1485
1486  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1487    assert(N == 1 && "Invalid number of operands!");
1488    // The operand is actually a so_imm, but we have its
1489    // negation in the assembly source, so twiddle it here.
1490    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1491    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1492  }
1493
1494  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1495    assert(N == 1 && "Invalid number of operands!");
1496    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1497  }
1498
1499  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1500    assert(N == 1 && "Invalid number of operands!");
1501    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1502  }
1503
1504  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1505    assert(N == 2 && "Invalid number of operands!");
1506    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1507    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1508  }
1509
1510  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1511    assert(N == 3 && "Invalid number of operands!");
1512    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1513    if (!Memory.OffsetRegNum) {
1514      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1515      // Special case for #-0
1516      if (Val == INT32_MIN) Val = 0;
1517      if (Val < 0) Val = -Val;
1518      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1519    } else {
1520      // For register offset, we encode the shift type and negation flag
1521      // here.
1522      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1523                              Memory.ShiftImm, Memory.ShiftType);
1524    }
1525    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1526    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1527    Inst.addOperand(MCOperand::CreateImm(Val));
1528  }
1529
1530  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1531    assert(N == 2 && "Invalid number of operands!");
1532    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1533    assert(CE && "non-constant AM2OffsetImm operand!");
1534    int32_t Val = CE->getValue();
1535    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1536    // Special case for #-0
1537    if (Val == INT32_MIN) Val = 0;
1538    if (Val < 0) Val = -Val;
1539    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1540    Inst.addOperand(MCOperand::CreateReg(0));
1541    Inst.addOperand(MCOperand::CreateImm(Val));
1542  }
1543
1544  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1545    assert(N == 3 && "Invalid number of operands!");
1546    // If we have an immediate that's not a constant, treat it as a label
1547    // reference needing a fixup. If it is a constant, it's something else
1548    // and we reject it.
1549    if (isImm()) {
1550      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1551      Inst.addOperand(MCOperand::CreateReg(0));
1552      Inst.addOperand(MCOperand::CreateImm(0));
1553      return;
1554    }
1555
1556    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1557    if (!Memory.OffsetRegNum) {
1558      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1559      // Special case for #-0
1560      if (Val == INT32_MIN) Val = 0;
1561      if (Val < 0) Val = -Val;
1562      Val = ARM_AM::getAM3Opc(AddSub, Val);
1563    } else {
1564      // For register offset, we encode the shift type and negation flag
1565      // here.
1566      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1567    }
1568    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1569    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1570    Inst.addOperand(MCOperand::CreateImm(Val));
1571  }
1572
1573  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1574    assert(N == 2 && "Invalid number of operands!");
1575    if (Kind == k_PostIndexRegister) {
1576      int32_t Val =
1577        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1578      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1579      Inst.addOperand(MCOperand::CreateImm(Val));
1580      return;
1581    }
1582
1583    // Constant offset.
1584    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1585    int32_t Val = CE->getValue();
1586    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1587    // Special case for #-0
1588    if (Val == INT32_MIN) Val = 0;
1589    if (Val < 0) Val = -Val;
1590    Val = ARM_AM::getAM3Opc(AddSub, Val);
1591    Inst.addOperand(MCOperand::CreateReg(0));
1592    Inst.addOperand(MCOperand::CreateImm(Val));
1593  }
1594
1595  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1596    assert(N == 2 && "Invalid number of operands!");
1597    // If we have an immediate that's not a constant, treat it as a label
1598    // reference needing a fixup. If it is a constant, it's something else
1599    // and we reject it.
1600    if (isImm()) {
1601      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1602      Inst.addOperand(MCOperand::CreateImm(0));
1603      return;
1604    }
1605
1606    // The lower two bits are always zero and as such are not encoded.
1607    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1608    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1609    // Special case for #-0
1610    if (Val == INT32_MIN) Val = 0;
1611    if (Val < 0) Val = -Val;
1612    Val = ARM_AM::getAM5Opc(AddSub, Val);
1613    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1614    Inst.addOperand(MCOperand::CreateImm(Val));
1615  }
1616
1617  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1618    assert(N == 2 && "Invalid number of operands!");
1619    // If we have an immediate that's not a constant, treat it as a label
1620    // reference needing a fixup. If it is a constant, it's something else
1621    // and we reject it.
1622    if (isImm()) {
1623      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1624      Inst.addOperand(MCOperand::CreateImm(0));
1625      return;
1626    }
1627
1628    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1629    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1630    Inst.addOperand(MCOperand::CreateImm(Val));
1631  }
1632
1633  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1634    assert(N == 2 && "Invalid number of operands!");
1635    // The lower two bits are always zero and as such are not encoded.
1636    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1637    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1638    Inst.addOperand(MCOperand::CreateImm(Val));
1639  }
1640
1641  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1642    assert(N == 2 && "Invalid number of operands!");
1643    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1644    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1645    Inst.addOperand(MCOperand::CreateImm(Val));
1646  }
1647
1648  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1649    addMemImm8OffsetOperands(Inst, N);
1650  }
1651
1652  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1653    addMemImm8OffsetOperands(Inst, N);
1654  }
1655
1656  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1657    assert(N == 2 && "Invalid number of operands!");
1658    // If this is an immediate, it's a label reference.
1659    if (Kind == k_Immediate) {
1660      addExpr(Inst, getImm());
1661      Inst.addOperand(MCOperand::CreateImm(0));
1662      return;
1663    }
1664
1665    // Otherwise, it's a normal memory reg+offset.
1666    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1667    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1668    Inst.addOperand(MCOperand::CreateImm(Val));
1669  }
1670
1671  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1672    assert(N == 2 && "Invalid number of operands!");
1673    // If this is an immediate, it's a label reference.
1674    if (Kind == k_Immediate) {
1675      addExpr(Inst, getImm());
1676      Inst.addOperand(MCOperand::CreateImm(0));
1677      return;
1678    }
1679
1680    // Otherwise, it's a normal memory reg+offset.
1681    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1682    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1683    Inst.addOperand(MCOperand::CreateImm(Val));
1684  }
1685
1686  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1687    assert(N == 2 && "Invalid number of operands!");
1688    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1689    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1690  }
1691
1692  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1693    assert(N == 2 && "Invalid number of operands!");
1694    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1695    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1696  }
1697
1698  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1699    assert(N == 3 && "Invalid number of operands!");
1700    unsigned Val =
1701      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1702                        Memory.ShiftImm, Memory.ShiftType);
1703    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1704    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1705    Inst.addOperand(MCOperand::CreateImm(Val));
1706  }
1707
1708  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1709    assert(N == 3 && "Invalid number of operands!");
1710    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1711    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1712    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1713  }
1714
1715  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1716    assert(N == 2 && "Invalid number of operands!");
1717    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1718    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1719  }
1720
1721  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1722    assert(N == 2 && "Invalid number of operands!");
1723    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1724    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1725    Inst.addOperand(MCOperand::CreateImm(Val));
1726  }
1727
1728  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1729    assert(N == 2 && "Invalid number of operands!");
1730    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1731    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1732    Inst.addOperand(MCOperand::CreateImm(Val));
1733  }
1734
1735  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1736    assert(N == 2 && "Invalid number of operands!");
1737    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1738    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1739    Inst.addOperand(MCOperand::CreateImm(Val));
1740  }
1741
1742  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1743    assert(N == 2 && "Invalid number of operands!");
1744    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1745    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1746    Inst.addOperand(MCOperand::CreateImm(Val));
1747  }
1748
1749  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1750    assert(N == 1 && "Invalid number of operands!");
1751    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1752    assert(CE && "non-constant post-idx-imm8 operand!");
1753    int Imm = CE->getValue();
1754    bool isAdd = Imm >= 0;
1755    if (Imm == INT32_MIN) Imm = 0;
1756    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1757    Inst.addOperand(MCOperand::CreateImm(Imm));
1758  }
1759
1760  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1761    assert(N == 1 && "Invalid number of operands!");
1762    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1763    assert(CE && "non-constant post-idx-imm8s4 operand!");
1764    int Imm = CE->getValue();
1765    bool isAdd = Imm >= 0;
1766    if (Imm == INT32_MIN) Imm = 0;
1767    // Immediate is scaled by 4.
1768    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1769    Inst.addOperand(MCOperand::CreateImm(Imm));
1770  }
1771
1772  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1773    assert(N == 2 && "Invalid number of operands!");
1774    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1775    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1776  }
1777
1778  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1779    assert(N == 2 && "Invalid number of operands!");
1780    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1781    // The sign, shift type, and shift amount are encoded in a single operand
1782    // using the AM2 encoding helpers.
1783    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1784    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1785                                     PostIdxReg.ShiftTy);
1786    Inst.addOperand(MCOperand::CreateImm(Imm));
1787  }
1788
1789  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1790    assert(N == 1 && "Invalid number of operands!");
1791    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1792  }
1793
1794  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 1 && "Invalid number of operands!");
1796    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1797  }
1798
1799  void addVecListOperands(MCInst &Inst, unsigned N) const {
1800    assert(N == 1 && "Invalid number of operands!");
1801    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1802  }
1803
1804  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1805    assert(N == 2 && "Invalid number of operands!");
1806    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1807    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1808  }
1809
1810  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1811    assert(N == 1 && "Invalid number of operands!");
1812    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1813  }
1814
1815  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1816    assert(N == 1 && "Invalid number of operands!");
1817    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1818  }
1819
1820  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1821    assert(N == 1 && "Invalid number of operands!");
1822    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1823  }
1824
1825  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1826    assert(N == 1 && "Invalid number of operands!");
1827    // The immediate encodes the type of constant as well as the value.
1828    // Mask in that this is an i8 splat.
1829    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1830    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1831  }
1832
1833  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1834    assert(N == 1 && "Invalid number of operands!");
1835    // The immediate encodes the type of constant as well as the value.
1836    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1837    unsigned Value = CE->getValue();
1838    if (Value >= 256)
1839      Value = (Value >> 8) | 0xa00;
1840    else
1841      Value |= 0x800;
1842    Inst.addOperand(MCOperand::CreateImm(Value));
1843  }
1844
1845  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1846    assert(N == 1 && "Invalid number of operands!");
1847    // The immediate encodes the type of constant as well as the value.
1848    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1849    unsigned Value = CE->getValue();
1850    if (Value >= 256 && Value <= 0xff00)
1851      Value = (Value >> 8) | 0x200;
1852    else if (Value > 0xffff && Value <= 0xff0000)
1853      Value = (Value >> 16) | 0x400;
1854    else if (Value > 0xffffff)
1855      Value = (Value >> 24) | 0x600;
1856    Inst.addOperand(MCOperand::CreateImm(Value));
1857  }
1858
1859  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1860    assert(N == 1 && "Invalid number of operands!");
1861    // The immediate encodes the type of constant as well as the value.
1862    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1863    unsigned Value = CE->getValue();
1864    if (Value >= 256 && Value <= 0xffff)
1865      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1866    else if (Value > 0xffff && Value <= 0xffffff)
1867      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1868    else if (Value > 0xffffff)
1869      Value = (Value >> 24) | 0x600;
1870    Inst.addOperand(MCOperand::CreateImm(Value));
1871  }
1872
1873  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1874    assert(N == 1 && "Invalid number of operands!");
1875    // The immediate encodes the type of constant as well as the value.
1876    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1877    unsigned Value = ~CE->getValue();
1878    if (Value >= 256 && Value <= 0xffff)
1879      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1880    else if (Value > 0xffff && Value <= 0xffffff)
1881      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1882    else if (Value > 0xffffff)
1883      Value = (Value >> 24) | 0x600;
1884    Inst.addOperand(MCOperand::CreateImm(Value));
1885  }
1886
1887  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1888    assert(N == 1 && "Invalid number of operands!");
1889    // The immediate encodes the type of constant as well as the value.
1890    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1891    uint64_t Value = CE->getValue();
1892    unsigned Imm = 0;
1893    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1894      Imm |= (Value & 1) << i;
1895    }
1896    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1897  }
1898
1899  virtual void print(raw_ostream &OS) const;
1900
1901  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1902    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1903    Op->ITMask.Mask = Mask;
1904    Op->StartLoc = S;
1905    Op->EndLoc = S;
1906    return Op;
1907  }
1908
1909  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1910    ARMOperand *Op = new ARMOperand(k_CondCode);
1911    Op->CC.Val = CC;
1912    Op->StartLoc = S;
1913    Op->EndLoc = S;
1914    return Op;
1915  }
1916
1917  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1918    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1919    Op->Cop.Val = CopVal;
1920    Op->StartLoc = S;
1921    Op->EndLoc = S;
1922    return Op;
1923  }
1924
1925  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1926    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1927    Op->Cop.Val = CopVal;
1928    Op->StartLoc = S;
1929    Op->EndLoc = S;
1930    return Op;
1931  }
1932
1933  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1934    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1935    Op->Cop.Val = Val;
1936    Op->StartLoc = S;
1937    Op->EndLoc = E;
1938    return Op;
1939  }
1940
1941  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1942    ARMOperand *Op = new ARMOperand(k_CCOut);
1943    Op->Reg.RegNum = RegNum;
1944    Op->StartLoc = S;
1945    Op->EndLoc = S;
1946    return Op;
1947  }
1948
1949  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1950    ARMOperand *Op = new ARMOperand(k_Token);
1951    Op->Tok.Data = Str.data();
1952    Op->Tok.Length = Str.size();
1953    Op->StartLoc = S;
1954    Op->EndLoc = S;
1955    return Op;
1956  }
1957
1958  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1959    ARMOperand *Op = new ARMOperand(k_Register);
1960    Op->Reg.RegNum = RegNum;
1961    Op->StartLoc = S;
1962    Op->EndLoc = E;
1963    return Op;
1964  }
1965
1966  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1967                                           unsigned SrcReg,
1968                                           unsigned ShiftReg,
1969                                           unsigned ShiftImm,
1970                                           SMLoc S, SMLoc E) {
1971    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1972    Op->RegShiftedReg.ShiftTy = ShTy;
1973    Op->RegShiftedReg.SrcReg = SrcReg;
1974    Op->RegShiftedReg.ShiftReg = ShiftReg;
1975    Op->RegShiftedReg.ShiftImm = ShiftImm;
1976    Op->StartLoc = S;
1977    Op->EndLoc = E;
1978    return Op;
1979  }
1980
1981  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1982                                            unsigned SrcReg,
1983                                            unsigned ShiftImm,
1984                                            SMLoc S, SMLoc E) {
1985    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1986    Op->RegShiftedImm.ShiftTy = ShTy;
1987    Op->RegShiftedImm.SrcReg = SrcReg;
1988    Op->RegShiftedImm.ShiftImm = ShiftImm;
1989    Op->StartLoc = S;
1990    Op->EndLoc = E;
1991    return Op;
1992  }
1993
1994  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1995                                   SMLoc S, SMLoc E) {
1996    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1997    Op->ShifterImm.isASR = isASR;
1998    Op->ShifterImm.Imm = Imm;
1999    Op->StartLoc = S;
2000    Op->EndLoc = E;
2001    return Op;
2002  }
2003
2004  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2005    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2006    Op->RotImm.Imm = Imm;
2007    Op->StartLoc = S;
2008    Op->EndLoc = E;
2009    return Op;
2010  }
2011
2012  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2013                                    SMLoc S, SMLoc E) {
2014    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2015    Op->Bitfield.LSB = LSB;
2016    Op->Bitfield.Width = Width;
2017    Op->StartLoc = S;
2018    Op->EndLoc = E;
2019    return Op;
2020  }
2021
2022  static ARMOperand *
2023  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2024                SMLoc StartLoc, SMLoc EndLoc) {
2025    KindTy Kind = k_RegisterList;
2026
2027    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2028      Kind = k_DPRRegisterList;
2029    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2030             contains(Regs.front().first))
2031      Kind = k_SPRRegisterList;
2032
2033    ARMOperand *Op = new ARMOperand(Kind);
2034    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2035           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2036      Op->Registers.push_back(I->first);
2037    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2038    Op->StartLoc = StartLoc;
2039    Op->EndLoc = EndLoc;
2040    return Op;
2041  }
2042
2043  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2044                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2045    ARMOperand *Op = new ARMOperand(k_VectorList);
2046    Op->VectorList.RegNum = RegNum;
2047    Op->VectorList.Count = Count;
2048    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2049    Op->StartLoc = S;
2050    Op->EndLoc = E;
2051    return Op;
2052  }
2053
2054  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2055                                              bool isDoubleSpaced,
2056                                              SMLoc S, SMLoc E) {
2057    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2058    Op->VectorList.RegNum = RegNum;
2059    Op->VectorList.Count = Count;
2060    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2061    Op->StartLoc = S;
2062    Op->EndLoc = E;
2063    return Op;
2064  }
2065
2066  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2067                                             unsigned Index,
2068                                             bool isDoubleSpaced,
2069                                             SMLoc S, SMLoc E) {
2070    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2071    Op->VectorList.RegNum = RegNum;
2072    Op->VectorList.Count = Count;
2073    Op->VectorList.LaneIndex = Index;
2074    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2075    Op->StartLoc = S;
2076    Op->EndLoc = E;
2077    return Op;
2078  }
2079
2080  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2081                                       MCContext &Ctx) {
2082    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2083    Op->VectorIndex.Val = Idx;
2084    Op->StartLoc = S;
2085    Op->EndLoc = E;
2086    return Op;
2087  }
2088
2089  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2090    ARMOperand *Op = new ARMOperand(k_Immediate);
2091    Op->Imm.Val = Val;
2092    Op->StartLoc = S;
2093    Op->EndLoc = E;
2094    return Op;
2095  }
2096
2097  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2098    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2099    Op->FPImm.Val = Val;
2100    Op->StartLoc = S;
2101    Op->EndLoc = S;
2102    return Op;
2103  }
2104
2105  static ARMOperand *CreateMem(unsigned BaseRegNum,
2106                               const MCConstantExpr *OffsetImm,
2107                               unsigned OffsetRegNum,
2108                               ARM_AM::ShiftOpc ShiftType,
2109                               unsigned ShiftImm,
2110                               unsigned Alignment,
2111                               bool isNegative,
2112                               SMLoc S, SMLoc E) {
2113    ARMOperand *Op = new ARMOperand(k_Memory);
2114    Op->Memory.BaseRegNum = BaseRegNum;
2115    Op->Memory.OffsetImm = OffsetImm;
2116    Op->Memory.OffsetRegNum = OffsetRegNum;
2117    Op->Memory.ShiftType = ShiftType;
2118    Op->Memory.ShiftImm = ShiftImm;
2119    Op->Memory.Alignment = Alignment;
2120    Op->Memory.isNegative = isNegative;
2121    Op->StartLoc = S;
2122    Op->EndLoc = E;
2123    return Op;
2124  }
2125
2126  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2127                                      ARM_AM::ShiftOpc ShiftTy,
2128                                      unsigned ShiftImm,
2129                                      SMLoc S, SMLoc E) {
2130    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2131    Op->PostIdxReg.RegNum = RegNum;
2132    Op->PostIdxReg.isAdd = isAdd;
2133    Op->PostIdxReg.ShiftTy = ShiftTy;
2134    Op->PostIdxReg.ShiftImm = ShiftImm;
2135    Op->StartLoc = S;
2136    Op->EndLoc = E;
2137    return Op;
2138  }
2139
2140  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2141    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2142    Op->MBOpt.Val = Opt;
2143    Op->StartLoc = S;
2144    Op->EndLoc = S;
2145    return Op;
2146  }
2147
2148  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2149    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2150    Op->IFlags.Val = IFlags;
2151    Op->StartLoc = S;
2152    Op->EndLoc = S;
2153    return Op;
2154  }
2155
2156  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2157    ARMOperand *Op = new ARMOperand(k_MSRMask);
2158    Op->MMask.Val = MMask;
2159    Op->StartLoc = S;
2160    Op->EndLoc = S;
2161    return Op;
2162  }
2163};
2164
2165} // end anonymous namespace.
2166
2167void ARMOperand::print(raw_ostream &OS) const {
2168  switch (Kind) {
2169  case k_FPImmediate:
2170    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2171       << ") >";
2172    break;
2173  case k_CondCode:
2174    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2175    break;
2176  case k_CCOut:
2177    OS << "<ccout " << getReg() << ">";
2178    break;
2179  case k_ITCondMask: {
2180    static const char *MaskStr[] = {
2181      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2182      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2183    };
2184    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2185    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2186    break;
2187  }
2188  case k_CoprocNum:
2189    OS << "<coprocessor number: " << getCoproc() << ">";
2190    break;
2191  case k_CoprocReg:
2192    OS << "<coprocessor register: " << getCoproc() << ">";
2193    break;
2194  case k_CoprocOption:
2195    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2196    break;
2197  case k_MSRMask:
2198    OS << "<mask: " << getMSRMask() << ">";
2199    break;
2200  case k_Immediate:
2201    getImm()->print(OS);
2202    break;
2203  case k_MemBarrierOpt:
2204    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2205    break;
2206  case k_Memory:
2207    OS << "<memory "
2208       << " base:" << Memory.BaseRegNum;
2209    OS << ">";
2210    break;
2211  case k_PostIndexRegister:
2212    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2213       << PostIdxReg.RegNum;
2214    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2215      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2216         << PostIdxReg.ShiftImm;
2217    OS << ">";
2218    break;
2219  case k_ProcIFlags: {
2220    OS << "<ARM_PROC::";
2221    unsigned IFlags = getProcIFlags();
2222    for (int i=2; i >= 0; --i)
2223      if (IFlags & (1 << i))
2224        OS << ARM_PROC::IFlagsToString(1 << i);
2225    OS << ">";
2226    break;
2227  }
2228  case k_Register:
2229    OS << "<register " << getReg() << ">";
2230    break;
2231  case k_ShifterImmediate:
2232    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2233       << " #" << ShifterImm.Imm << ">";
2234    break;
2235  case k_ShiftedRegister:
2236    OS << "<so_reg_reg "
2237       << RegShiftedReg.SrcReg << " "
2238       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2239       << " " << RegShiftedReg.ShiftReg << ">";
2240    break;
2241  case k_ShiftedImmediate:
2242    OS << "<so_reg_imm "
2243       << RegShiftedImm.SrcReg << " "
2244       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2245       << " #" << RegShiftedImm.ShiftImm << ">";
2246    break;
2247  case k_RotateImmediate:
2248    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2249    break;
2250  case k_BitfieldDescriptor:
2251    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2252       << ", width: " << Bitfield.Width << ">";
2253    break;
2254  case k_RegisterList:
2255  case k_DPRRegisterList:
2256  case k_SPRRegisterList: {
2257    OS << "<register_list ";
2258
2259    const SmallVectorImpl<unsigned> &RegList = getRegList();
2260    for (SmallVectorImpl<unsigned>::const_iterator
2261           I = RegList.begin(), E = RegList.end(); I != E; ) {
2262      OS << *I;
2263      if (++I < E) OS << ", ";
2264    }
2265
2266    OS << ">";
2267    break;
2268  }
2269  case k_VectorList:
2270    OS << "<vector_list " << VectorList.Count << " * "
2271       << VectorList.RegNum << ">";
2272    break;
2273  case k_VectorListAllLanes:
2274    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2275       << VectorList.RegNum << ">";
2276    break;
2277  case k_VectorListIndexed:
2278    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2279       << VectorList.Count << " * " << VectorList.RegNum << ">";
2280    break;
2281  case k_Token:
2282    OS << "'" << getToken() << "'";
2283    break;
2284  case k_VectorIndex:
2285    OS << "<vectorindex " << getVectorIndex() << ">";
2286    break;
2287  }
2288}
2289
2290/// @name Auto-generated Match Functions
2291/// {
2292
2293static unsigned MatchRegisterName(StringRef Name);
2294
2295/// }
2296
2297bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2298                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2299  StartLoc = Parser.getTok().getLoc();
2300  RegNo = tryParseRegister();
2301  EndLoc = Parser.getTok().getLoc();
2302
2303  return (RegNo == (unsigned)-1);
2304}
2305
2306/// Try to parse a register name.  The token must be an Identifier when called,
2307/// and if it is a register name the token is eaten and the register number is
2308/// returned.  Otherwise return -1.
2309///
2310int ARMAsmParser::tryParseRegister() {
2311  const AsmToken &Tok = Parser.getTok();
2312  if (Tok.isNot(AsmToken::Identifier)) return -1;
2313
2314  std::string lowerCase = Tok.getString().lower();
2315  unsigned RegNum = MatchRegisterName(lowerCase);
2316  if (!RegNum) {
2317    RegNum = StringSwitch<unsigned>(lowerCase)
2318      .Case("r13", ARM::SP)
2319      .Case("r14", ARM::LR)
2320      .Case("r15", ARM::PC)
2321      .Case("ip", ARM::R12)
2322      // Additional register name aliases for 'gas' compatibility.
2323      .Case("a1", ARM::R0)
2324      .Case("a2", ARM::R1)
2325      .Case("a3", ARM::R2)
2326      .Case("a4", ARM::R3)
2327      .Case("v1", ARM::R4)
2328      .Case("v2", ARM::R5)
2329      .Case("v3", ARM::R6)
2330      .Case("v4", ARM::R7)
2331      .Case("v5", ARM::R8)
2332      .Case("v6", ARM::R9)
2333      .Case("v7", ARM::R10)
2334      .Case("v8", ARM::R11)
2335      .Case("sb", ARM::R9)
2336      .Case("sl", ARM::R10)
2337      .Case("fp", ARM::R11)
2338      .Default(0);
2339  }
2340  if (!RegNum) {
2341    // Check for aliases registered via .req. Canonicalize to lower case.
2342    // That's more consistent since register names are case insensitive, and
2343    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2344    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2345    // If no match, return failure.
2346    if (Entry == RegisterReqs.end())
2347      return -1;
2348    Parser.Lex(); // Eat identifier token.
2349    return Entry->getValue();
2350  }
2351
2352  Parser.Lex(); // Eat identifier token.
2353
2354  return RegNum;
2355}
2356
2357// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2358// If a recoverable error occurs, return 1. If an irrecoverable error
2359// occurs, return -1. An irrecoverable error is one where tokens have been
2360// consumed in the process of trying to parse the shifter (i.e., when it is
2361// indeed a shifter operand, but malformed).
2362int ARMAsmParser::tryParseShiftRegister(
2363                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2364  SMLoc S = Parser.getTok().getLoc();
2365  const AsmToken &Tok = Parser.getTok();
2366  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2367
2368  std::string lowerCase = Tok.getString().lower();
2369  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2370      .Case("asl", ARM_AM::lsl)
2371      .Case("lsl", ARM_AM::lsl)
2372      .Case("lsr", ARM_AM::lsr)
2373      .Case("asr", ARM_AM::asr)
2374      .Case("ror", ARM_AM::ror)
2375      .Case("rrx", ARM_AM::rrx)
2376      .Default(ARM_AM::no_shift);
2377
2378  if (ShiftTy == ARM_AM::no_shift)
2379    return 1;
2380
2381  Parser.Lex(); // Eat the operator.
2382
2383  // The source register for the shift has already been added to the
2384  // operand list, so we need to pop it off and combine it into the shifted
2385  // register operand instead.
2386  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2387  if (!PrevOp->isReg())
2388    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2389  int SrcReg = PrevOp->getReg();
2390  int64_t Imm = 0;
2391  int ShiftReg = 0;
2392  if (ShiftTy == ARM_AM::rrx) {
2393    // RRX Doesn't have an explicit shift amount. The encoder expects
2394    // the shift register to be the same as the source register. Seems odd,
2395    // but OK.
2396    ShiftReg = SrcReg;
2397  } else {
2398    // Figure out if this is shifted by a constant or a register (for non-RRX).
2399    if (Parser.getTok().is(AsmToken::Hash) ||
2400        Parser.getTok().is(AsmToken::Dollar)) {
2401      Parser.Lex(); // Eat hash.
2402      SMLoc ImmLoc = Parser.getTok().getLoc();
2403      const MCExpr *ShiftExpr = 0;
2404      if (getParser().ParseExpression(ShiftExpr)) {
2405        Error(ImmLoc, "invalid immediate shift value");
2406        return -1;
2407      }
2408      // The expression must be evaluatable as an immediate.
2409      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2410      if (!CE) {
2411        Error(ImmLoc, "invalid immediate shift value");
2412        return -1;
2413      }
2414      // Range check the immediate.
2415      // lsl, ror: 0 <= imm <= 31
2416      // lsr, asr: 0 <= imm <= 32
2417      Imm = CE->getValue();
2418      if (Imm < 0 ||
2419          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2420          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2421        Error(ImmLoc, "immediate shift value out of range");
2422        return -1;
2423      }
2424    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2425      ShiftReg = tryParseRegister();
2426      SMLoc L = Parser.getTok().getLoc();
2427      if (ShiftReg == -1) {
2428        Error (L, "expected immediate or register in shift operand");
2429        return -1;
2430      }
2431    } else {
2432      Error (Parser.getTok().getLoc(),
2433                    "expected immediate or register in shift operand");
2434      return -1;
2435    }
2436  }
2437
2438  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2439    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2440                                                         ShiftReg, Imm,
2441                                               S, Parser.getTok().getLoc()));
2442  else
2443    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2444                                               S, Parser.getTok().getLoc()));
2445
2446  return 0;
2447}
2448
2449
2450/// Try to parse a register name.  The token must be an Identifier when called.
2451/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2452/// if there is a "writeback". 'true' if it's not a register.
2453///
2454/// TODO this is likely to change to allow different register types and or to
2455/// parse for a specific register type.
2456bool ARMAsmParser::
2457tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2458  SMLoc S = Parser.getTok().getLoc();
2459  int RegNo = tryParseRegister();
2460  if (RegNo == -1)
2461    return true;
2462
2463  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2464
2465  const AsmToken &ExclaimTok = Parser.getTok();
2466  if (ExclaimTok.is(AsmToken::Exclaim)) {
2467    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2468                                               ExclaimTok.getLoc()));
2469    Parser.Lex(); // Eat exclaim token
2470    return false;
2471  }
2472
2473  // Also check for an index operand. This is only legal for vector registers,
2474  // but that'll get caught OK in operand matching, so we don't need to
2475  // explicitly filter everything else out here.
2476  if (Parser.getTok().is(AsmToken::LBrac)) {
2477    SMLoc SIdx = Parser.getTok().getLoc();
2478    Parser.Lex(); // Eat left bracket token.
2479
2480    const MCExpr *ImmVal;
2481    if (getParser().ParseExpression(ImmVal))
2482      return MatchOperand_ParseFail;
2483    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2484    if (!MCE) {
2485      TokError("immediate value expected for vector index");
2486      return MatchOperand_ParseFail;
2487    }
2488
2489    SMLoc E = Parser.getTok().getLoc();
2490    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2491      Error(E, "']' expected");
2492      return MatchOperand_ParseFail;
2493    }
2494
2495    Parser.Lex(); // Eat right bracket token.
2496
2497    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2498                                                     SIdx, E,
2499                                                     getContext()));
2500  }
2501
2502  return false;
2503}
2504
2505/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2506/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2507/// "c5", ...
2508static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2509  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2510  // but efficient.
2511  switch (Name.size()) {
2512  default: break;
2513  case 2:
2514    if (Name[0] != CoprocOp)
2515      return -1;
2516    switch (Name[1]) {
2517    default:  return -1;
2518    case '0': return 0;
2519    case '1': return 1;
2520    case '2': return 2;
2521    case '3': return 3;
2522    case '4': return 4;
2523    case '5': return 5;
2524    case '6': return 6;
2525    case '7': return 7;
2526    case '8': return 8;
2527    case '9': return 9;
2528    }
2529    break;
2530  case 3:
2531    if (Name[0] != CoprocOp || Name[1] != '1')
2532      return -1;
2533    switch (Name[2]) {
2534    default:  return -1;
2535    case '0': return 10;
2536    case '1': return 11;
2537    case '2': return 12;
2538    case '3': return 13;
2539    case '4': return 14;
2540    case '5': return 15;
2541    }
2542    break;
2543  }
2544
2545  return -1;
2546}
2547
2548/// parseITCondCode - Try to parse a condition code for an IT instruction.
2549ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2550parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2551  SMLoc S = Parser.getTok().getLoc();
2552  const AsmToken &Tok = Parser.getTok();
2553  if (!Tok.is(AsmToken::Identifier))
2554    return MatchOperand_NoMatch;
2555  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2556    .Case("eq", ARMCC::EQ)
2557    .Case("ne", ARMCC::NE)
2558    .Case("hs", ARMCC::HS)
2559    .Case("cs", ARMCC::HS)
2560    .Case("lo", ARMCC::LO)
2561    .Case("cc", ARMCC::LO)
2562    .Case("mi", ARMCC::MI)
2563    .Case("pl", ARMCC::PL)
2564    .Case("vs", ARMCC::VS)
2565    .Case("vc", ARMCC::VC)
2566    .Case("hi", ARMCC::HI)
2567    .Case("ls", ARMCC::LS)
2568    .Case("ge", ARMCC::GE)
2569    .Case("lt", ARMCC::LT)
2570    .Case("gt", ARMCC::GT)
2571    .Case("le", ARMCC::LE)
2572    .Case("al", ARMCC::AL)
2573    .Default(~0U);
2574  if (CC == ~0U)
2575    return MatchOperand_NoMatch;
2576  Parser.Lex(); // Eat the token.
2577
2578  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2579
2580  return MatchOperand_Success;
2581}
2582
2583/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2584/// token must be an Identifier when called, and if it is a coprocessor
2585/// number, the token is eaten and the operand is added to the operand list.
2586ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2587parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2588  SMLoc S = Parser.getTok().getLoc();
2589  const AsmToken &Tok = Parser.getTok();
2590  if (Tok.isNot(AsmToken::Identifier))
2591    return MatchOperand_NoMatch;
2592
2593  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2594  if (Num == -1)
2595    return MatchOperand_NoMatch;
2596
2597  Parser.Lex(); // Eat identifier token.
2598  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2599  return MatchOperand_Success;
2600}
2601
2602/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2603/// token must be an Identifier when called, and if it is a coprocessor
2604/// number, the token is eaten and the operand is added to the operand list.
2605ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2606parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2607  SMLoc S = Parser.getTok().getLoc();
2608  const AsmToken &Tok = Parser.getTok();
2609  if (Tok.isNot(AsmToken::Identifier))
2610    return MatchOperand_NoMatch;
2611
2612  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2613  if (Reg == -1)
2614    return MatchOperand_NoMatch;
2615
2616  Parser.Lex(); // Eat identifier token.
2617  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2618  return MatchOperand_Success;
2619}
2620
2621/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2622/// coproc_option : '{' imm0_255 '}'
2623ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2624parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2625  SMLoc S = Parser.getTok().getLoc();
2626
2627  // If this isn't a '{', this isn't a coprocessor immediate operand.
2628  if (Parser.getTok().isNot(AsmToken::LCurly))
2629    return MatchOperand_NoMatch;
2630  Parser.Lex(); // Eat the '{'
2631
2632  const MCExpr *Expr;
2633  SMLoc Loc = Parser.getTok().getLoc();
2634  if (getParser().ParseExpression(Expr)) {
2635    Error(Loc, "illegal expression");
2636    return MatchOperand_ParseFail;
2637  }
2638  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2639  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2640    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2641    return MatchOperand_ParseFail;
2642  }
2643  int Val = CE->getValue();
2644
2645  // Check for and consume the closing '}'
2646  if (Parser.getTok().isNot(AsmToken::RCurly))
2647    return MatchOperand_ParseFail;
2648  SMLoc E = Parser.getTok().getLoc();
2649  Parser.Lex(); // Eat the '}'
2650
2651  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2652  return MatchOperand_Success;
2653}
2654
2655// For register list parsing, we need to map from raw GPR register numbering
2656// to the enumeration values. The enumeration values aren't sorted by
2657// register number due to our using "sp", "lr" and "pc" as canonical names.
2658static unsigned getNextRegister(unsigned Reg) {
2659  // If this is a GPR, we need to do it manually, otherwise we can rely
2660  // on the sort ordering of the enumeration since the other reg-classes
2661  // are sane.
2662  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2663    return Reg + 1;
2664  switch(Reg) {
2665  default: assert(0 && "Invalid GPR number!");
2666  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2667  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2668  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2669  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2670  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2671  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2672  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2673  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2674  }
2675}
2676
2677// Return the low-subreg of a given Q register.
2678static unsigned getDRegFromQReg(unsigned QReg) {
2679  switch (QReg) {
2680  default: llvm_unreachable("expected a Q register!");
2681  case ARM::Q0:  return ARM::D0;
2682  case ARM::Q1:  return ARM::D2;
2683  case ARM::Q2:  return ARM::D4;
2684  case ARM::Q3:  return ARM::D6;
2685  case ARM::Q4:  return ARM::D8;
2686  case ARM::Q5:  return ARM::D10;
2687  case ARM::Q6:  return ARM::D12;
2688  case ARM::Q7:  return ARM::D14;
2689  case ARM::Q8:  return ARM::D16;
2690  case ARM::Q9:  return ARM::D18;
2691  case ARM::Q10: return ARM::D20;
2692  case ARM::Q11: return ARM::D22;
2693  case ARM::Q12: return ARM::D24;
2694  case ARM::Q13: return ARM::D26;
2695  case ARM::Q14: return ARM::D28;
2696  case ARM::Q15: return ARM::D30;
2697  }
2698}
2699
2700/// Parse a register list.
2701bool ARMAsmParser::
2702parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2703  assert(Parser.getTok().is(AsmToken::LCurly) &&
2704         "Token is not a Left Curly Brace");
2705  SMLoc S = Parser.getTok().getLoc();
2706  Parser.Lex(); // Eat '{' token.
2707  SMLoc RegLoc = Parser.getTok().getLoc();
2708
2709  // Check the first register in the list to see what register class
2710  // this is a list of.
2711  int Reg = tryParseRegister();
2712  if (Reg == -1)
2713    return Error(RegLoc, "register expected");
2714
2715  // The reglist instructions have at most 16 registers, so reserve
2716  // space for that many.
2717  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2718
2719  // Allow Q regs and just interpret them as the two D sub-registers.
2720  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2721    Reg = getDRegFromQReg(Reg);
2722    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2723    ++Reg;
2724  }
2725  const MCRegisterClass *RC;
2726  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2727    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2728  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2729    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2730  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2731    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2732  else
2733    return Error(RegLoc, "invalid register in register list");
2734
2735  // Store the register.
2736  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2737
2738  // This starts immediately after the first register token in the list,
2739  // so we can see either a comma or a minus (range separator) as a legal
2740  // next token.
2741  while (Parser.getTok().is(AsmToken::Comma) ||
2742         Parser.getTok().is(AsmToken::Minus)) {
2743    if (Parser.getTok().is(AsmToken::Minus)) {
2744      Parser.Lex(); // Eat the minus.
2745      SMLoc EndLoc = Parser.getTok().getLoc();
2746      int EndReg = tryParseRegister();
2747      if (EndReg == -1)
2748        return Error(EndLoc, "register expected");
2749      // Allow Q regs and just interpret them as the two D sub-registers.
2750      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2751        EndReg = getDRegFromQReg(EndReg) + 1;
2752      // If the register is the same as the start reg, there's nothing
2753      // more to do.
2754      if (Reg == EndReg)
2755        continue;
2756      // The register must be in the same register class as the first.
2757      if (!RC->contains(EndReg))
2758        return Error(EndLoc, "invalid register in register list");
2759      // Ranges must go from low to high.
2760      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2761        return Error(EndLoc, "bad range in register list");
2762
2763      // Add all the registers in the range to the register list.
2764      while (Reg != EndReg) {
2765        Reg = getNextRegister(Reg);
2766        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2767      }
2768      continue;
2769    }
2770    Parser.Lex(); // Eat the comma.
2771    RegLoc = Parser.getTok().getLoc();
2772    int OldReg = Reg;
2773    const AsmToken RegTok = Parser.getTok();
2774    Reg = tryParseRegister();
2775    if (Reg == -1)
2776      return Error(RegLoc, "register expected");
2777    // Allow Q regs and just interpret them as the two D sub-registers.
2778    bool isQReg = false;
2779    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2780      Reg = getDRegFromQReg(Reg);
2781      isQReg = true;
2782    }
2783    // The register must be in the same register class as the first.
2784    if (!RC->contains(Reg))
2785      return Error(RegLoc, "invalid register in register list");
2786    // List must be monotonically increasing.
2787    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2788      return Error(RegLoc, "register list not in ascending order");
2789    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2790      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2791              ") in register list");
2792      continue;
2793    }
2794    // VFP register lists must also be contiguous.
2795    // It's OK to use the enumeration values directly here rather, as the
2796    // VFP register classes have the enum sorted properly.
2797    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2798        Reg != OldReg + 1)
2799      return Error(RegLoc, "non-contiguous register range");
2800    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2801    if (isQReg)
2802      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2803  }
2804
2805  SMLoc E = Parser.getTok().getLoc();
2806  if (Parser.getTok().isNot(AsmToken::RCurly))
2807    return Error(E, "'}' expected");
2808  Parser.Lex(); // Eat '}' token.
2809
2810  // Push the register list operand.
2811  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2812
2813  // The ARM system instruction variants for LDM/STM have a '^' token here.
2814  if (Parser.getTok().is(AsmToken::Caret)) {
2815    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2816    Parser.Lex(); // Eat '^' token.
2817  }
2818
2819  return false;
2820}
2821
2822// Helper function to parse the lane index for vector lists.
2823ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2824parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2825  Index = 0; // Always return a defined index value.
2826  if (Parser.getTok().is(AsmToken::LBrac)) {
2827    Parser.Lex(); // Eat the '['.
2828    if (Parser.getTok().is(AsmToken::RBrac)) {
2829      // "Dn[]" is the 'all lanes' syntax.
2830      LaneKind = AllLanes;
2831      Parser.Lex(); // Eat the ']'.
2832      return MatchOperand_Success;
2833    }
2834    const MCExpr *LaneIndex;
2835    SMLoc Loc = Parser.getTok().getLoc();
2836    if (getParser().ParseExpression(LaneIndex)) {
2837      Error(Loc, "illegal expression");
2838      return MatchOperand_ParseFail;
2839    }
2840    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2841    if (!CE) {
2842      Error(Loc, "lane index must be empty or an integer");
2843      return MatchOperand_ParseFail;
2844    }
2845    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2846      Error(Parser.getTok().getLoc(), "']' expected");
2847      return MatchOperand_ParseFail;
2848    }
2849    Parser.Lex(); // Eat the ']'.
2850    int64_t Val = CE->getValue();
2851
2852    // FIXME: Make this range check context sensitive for .8, .16, .32.
2853    if (Val < 0 || Val > 7) {
2854      Error(Parser.getTok().getLoc(), "lane index out of range");
2855      return MatchOperand_ParseFail;
2856    }
2857    Index = Val;
2858    LaneKind = IndexedLane;
2859    return MatchOperand_Success;
2860  }
2861  LaneKind = NoLanes;
2862  return MatchOperand_Success;
2863}
2864
2865// parse a vector register list
2866ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2867parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2868  VectorLaneTy LaneKind;
2869  unsigned LaneIndex;
2870  SMLoc S = Parser.getTok().getLoc();
2871  // As an extension (to match gas), support a plain D register or Q register
2872  // (without encosing curly braces) as a single or double entry list,
2873  // respectively.
2874  if (Parser.getTok().is(AsmToken::Identifier)) {
2875    int Reg = tryParseRegister();
2876    if (Reg == -1)
2877      return MatchOperand_NoMatch;
2878    SMLoc E = Parser.getTok().getLoc();
2879    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2880      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2881      if (Res != MatchOperand_Success)
2882        return Res;
2883      switch (LaneKind) {
2884      default:
2885        assert(0 && "unexpected lane kind!");
2886      case NoLanes:
2887        E = Parser.getTok().getLoc();
2888        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2889        break;
2890      case AllLanes:
2891        E = Parser.getTok().getLoc();
2892        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2893                                                                S, E));
2894        break;
2895      case IndexedLane:
2896        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2897                                                               LaneIndex,
2898                                                               false, S, E));
2899        break;
2900      }
2901      return MatchOperand_Success;
2902    }
2903    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2904      Reg = getDRegFromQReg(Reg);
2905      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2906      if (Res != MatchOperand_Success)
2907        return Res;
2908      switch (LaneKind) {
2909      default:
2910        assert(0 && "unexpected lane kind!");
2911      case NoLanes:
2912        E = Parser.getTok().getLoc();
2913        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2914        break;
2915      case AllLanes:
2916        E = Parser.getTok().getLoc();
2917        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2918                                                                S, E));
2919        break;
2920      case IndexedLane:
2921        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2922                                                               LaneIndex,
2923                                                               false, S, E));
2924        break;
2925      }
2926      return MatchOperand_Success;
2927    }
2928    Error(S, "vector register expected");
2929    return MatchOperand_ParseFail;
2930  }
2931
2932  if (Parser.getTok().isNot(AsmToken::LCurly))
2933    return MatchOperand_NoMatch;
2934
2935  Parser.Lex(); // Eat '{' token.
2936  SMLoc RegLoc = Parser.getTok().getLoc();
2937
2938  int Reg = tryParseRegister();
2939  if (Reg == -1) {
2940    Error(RegLoc, "register expected");
2941    return MatchOperand_ParseFail;
2942  }
2943  unsigned Count = 1;
2944  int Spacing = 0;
2945  unsigned FirstReg = Reg;
2946  // The list is of D registers, but we also allow Q regs and just interpret
2947  // them as the two D sub-registers.
2948  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2949    FirstReg = Reg = getDRegFromQReg(Reg);
2950    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2951                 // it's ambiguous with four-register single spaced.
2952    ++Reg;
2953    ++Count;
2954  }
2955  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2956    return MatchOperand_ParseFail;
2957
2958  while (Parser.getTok().is(AsmToken::Comma) ||
2959         Parser.getTok().is(AsmToken::Minus)) {
2960    if (Parser.getTok().is(AsmToken::Minus)) {
2961      if (!Spacing)
2962        Spacing = 1; // Register range implies a single spaced list.
2963      else if (Spacing == 2) {
2964        Error(Parser.getTok().getLoc(),
2965              "sequential registers in double spaced list");
2966        return MatchOperand_ParseFail;
2967      }
2968      Parser.Lex(); // Eat the minus.
2969      SMLoc EndLoc = Parser.getTok().getLoc();
2970      int EndReg = tryParseRegister();
2971      if (EndReg == -1) {
2972        Error(EndLoc, "register expected");
2973        return MatchOperand_ParseFail;
2974      }
2975      // Allow Q regs and just interpret them as the two D sub-registers.
2976      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2977        EndReg = getDRegFromQReg(EndReg) + 1;
2978      // If the register is the same as the start reg, there's nothing
2979      // more to do.
2980      if (Reg == EndReg)
2981        continue;
2982      // The register must be in the same register class as the first.
2983      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2984        Error(EndLoc, "invalid register in register list");
2985        return MatchOperand_ParseFail;
2986      }
2987      // Ranges must go from low to high.
2988      if (Reg > EndReg) {
2989        Error(EndLoc, "bad range in register list");
2990        return MatchOperand_ParseFail;
2991      }
2992      // Parse the lane specifier if present.
2993      VectorLaneTy NextLaneKind;
2994      unsigned NextLaneIndex;
2995      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2996        return MatchOperand_ParseFail;
2997      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2998        Error(EndLoc, "mismatched lane index in register list");
2999        return MatchOperand_ParseFail;
3000      }
3001      EndLoc = Parser.getTok().getLoc();
3002
3003      // Add all the registers in the range to the register list.
3004      Count += EndReg - Reg;
3005      Reg = EndReg;
3006      continue;
3007    }
3008    Parser.Lex(); // Eat the comma.
3009    RegLoc = Parser.getTok().getLoc();
3010    int OldReg = Reg;
3011    Reg = tryParseRegister();
3012    if (Reg == -1) {
3013      Error(RegLoc, "register expected");
3014      return MatchOperand_ParseFail;
3015    }
3016    // vector register lists must be contiguous.
3017    // It's OK to use the enumeration values directly here rather, as the
3018    // VFP register classes have the enum sorted properly.
3019    //
3020    // The list is of D registers, but we also allow Q regs and just interpret
3021    // them as the two D sub-registers.
3022    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3023      if (!Spacing)
3024        Spacing = 1; // Register range implies a single spaced list.
3025      else if (Spacing == 2) {
3026        Error(RegLoc,
3027              "invalid register in double-spaced list (must be 'D' register')");
3028        return MatchOperand_ParseFail;
3029      }
3030      Reg = getDRegFromQReg(Reg);
3031      if (Reg != OldReg + 1) {
3032        Error(RegLoc, "non-contiguous register range");
3033        return MatchOperand_ParseFail;
3034      }
3035      ++Reg;
3036      Count += 2;
3037      // Parse the lane specifier if present.
3038      VectorLaneTy NextLaneKind;
3039      unsigned NextLaneIndex;
3040      SMLoc EndLoc = Parser.getTok().getLoc();
3041      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3042        return MatchOperand_ParseFail;
3043      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3044        Error(EndLoc, "mismatched lane index in register list");
3045        return MatchOperand_ParseFail;
3046      }
3047      continue;
3048    }
3049    // Normal D register.
3050    // Figure out the register spacing (single or double) of the list if
3051    // we don't know it already.
3052    if (!Spacing)
3053      Spacing = 1 + (Reg == OldReg + 2);
3054
3055    // Just check that it's contiguous and keep going.
3056    if (Reg != OldReg + Spacing) {
3057      Error(RegLoc, "non-contiguous register range");
3058      return MatchOperand_ParseFail;
3059    }
3060    ++Count;
3061    // Parse the lane specifier if present.
3062    VectorLaneTy NextLaneKind;
3063    unsigned NextLaneIndex;
3064    SMLoc EndLoc = Parser.getTok().getLoc();
3065    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3066      return MatchOperand_ParseFail;
3067    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3068      Error(EndLoc, "mismatched lane index in register list");
3069      return MatchOperand_ParseFail;
3070    }
3071  }
3072
3073  SMLoc E = Parser.getTok().getLoc();
3074  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3075    Error(E, "'}' expected");
3076    return MatchOperand_ParseFail;
3077  }
3078  Parser.Lex(); // Eat '}' token.
3079
3080  switch (LaneKind) {
3081  default:
3082    assert(0 && "unexpected lane kind in register list.");
3083  case NoLanes:
3084    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3085                                                    (Spacing == 2), S, E));
3086    break;
3087  case AllLanes:
3088    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3089                                                            (Spacing == 2),
3090                                                            S, E));
3091    break;
3092  case IndexedLane:
3093    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3094                                                           LaneIndex,
3095                                                           (Spacing == 2),
3096                                                           S, E));
3097    break;
3098  }
3099  return MatchOperand_Success;
3100}
3101
3102/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3103ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3104parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3105  SMLoc S = Parser.getTok().getLoc();
3106  const AsmToken &Tok = Parser.getTok();
3107  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3108  StringRef OptStr = Tok.getString();
3109
3110  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3111    .Case("sy",    ARM_MB::SY)
3112    .Case("st",    ARM_MB::ST)
3113    .Case("sh",    ARM_MB::ISH)
3114    .Case("ish",   ARM_MB::ISH)
3115    .Case("shst",  ARM_MB::ISHST)
3116    .Case("ishst", ARM_MB::ISHST)
3117    .Case("nsh",   ARM_MB::NSH)
3118    .Case("un",    ARM_MB::NSH)
3119    .Case("nshst", ARM_MB::NSHST)
3120    .Case("unst",  ARM_MB::NSHST)
3121    .Case("osh",   ARM_MB::OSH)
3122    .Case("oshst", ARM_MB::OSHST)
3123    .Default(~0U);
3124
3125  if (Opt == ~0U)
3126    return MatchOperand_NoMatch;
3127
3128  Parser.Lex(); // Eat identifier token.
3129  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3130  return MatchOperand_Success;
3131}
3132
3133/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3134ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3135parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3136  SMLoc S = Parser.getTok().getLoc();
3137  const AsmToken &Tok = Parser.getTok();
3138  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3139  StringRef IFlagsStr = Tok.getString();
3140
3141  // An iflags string of "none" is interpreted to mean that none of the AIF
3142  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3143  unsigned IFlags = 0;
3144  if (IFlagsStr != "none") {
3145        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3146      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3147        .Case("a", ARM_PROC::A)
3148        .Case("i", ARM_PROC::I)
3149        .Case("f", ARM_PROC::F)
3150        .Default(~0U);
3151
3152      // If some specific iflag is already set, it means that some letter is
3153      // present more than once, this is not acceptable.
3154      if (Flag == ~0U || (IFlags & Flag))
3155        return MatchOperand_NoMatch;
3156
3157      IFlags |= Flag;
3158    }
3159  }
3160
3161  Parser.Lex(); // Eat identifier token.
3162  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3163  return MatchOperand_Success;
3164}
3165
3166/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3167ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3168parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3169  SMLoc S = Parser.getTok().getLoc();
3170  const AsmToken &Tok = Parser.getTok();
3171  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3172  StringRef Mask = Tok.getString();
3173
3174  if (isMClass()) {
3175    // See ARMv6-M 10.1.1
3176    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3177      .Case("apsr", 0)
3178      .Case("iapsr", 1)
3179      .Case("eapsr", 2)
3180      .Case("xpsr", 3)
3181      .Case("ipsr", 5)
3182      .Case("epsr", 6)
3183      .Case("iepsr", 7)
3184      .Case("msp", 8)
3185      .Case("psp", 9)
3186      .Case("primask", 16)
3187      .Case("basepri", 17)
3188      .Case("basepri_max", 18)
3189      .Case("faultmask", 19)
3190      .Case("control", 20)
3191      .Default(~0U);
3192
3193    if (FlagsVal == ~0U)
3194      return MatchOperand_NoMatch;
3195
3196    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3197      // basepri, basepri_max and faultmask only valid for V7m.
3198      return MatchOperand_NoMatch;
3199
3200    Parser.Lex(); // Eat identifier token.
3201    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3202    return MatchOperand_Success;
3203  }
3204
3205  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3206  size_t Start = 0, Next = Mask.find('_');
3207  StringRef Flags = "";
3208  std::string SpecReg = Mask.slice(Start, Next).lower();
3209  if (Next != StringRef::npos)
3210    Flags = Mask.slice(Next+1, Mask.size());
3211
3212  // FlagsVal contains the complete mask:
3213  // 3-0: Mask
3214  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3215  unsigned FlagsVal = 0;
3216
3217  if (SpecReg == "apsr") {
3218    FlagsVal = StringSwitch<unsigned>(Flags)
3219    .Case("nzcvq",  0x8) // same as CPSR_f
3220    .Case("g",      0x4) // same as CPSR_s
3221    .Case("nzcvqg", 0xc) // same as CPSR_fs
3222    .Default(~0U);
3223
3224    if (FlagsVal == ~0U) {
3225      if (!Flags.empty())
3226        return MatchOperand_NoMatch;
3227      else
3228        FlagsVal = 8; // No flag
3229    }
3230  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3231    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3232      Flags = "fc";
3233    for (int i = 0, e = Flags.size(); i != e; ++i) {
3234      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3235      .Case("c", 1)
3236      .Case("x", 2)
3237      .Case("s", 4)
3238      .Case("f", 8)
3239      .Default(~0U);
3240
3241      // If some specific flag is already set, it means that some letter is
3242      // present more than once, this is not acceptable.
3243      if (FlagsVal == ~0U || (FlagsVal & Flag))
3244        return MatchOperand_NoMatch;
3245      FlagsVal |= Flag;
3246    }
3247  } else // No match for special register.
3248    return MatchOperand_NoMatch;
3249
3250  // Special register without flags is NOT equivalent to "fc" flags.
3251  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3252  // two lines would enable gas compatibility at the expense of breaking
3253  // round-tripping.
3254  //
3255  // if (!FlagsVal)
3256  //  FlagsVal = 0x9;
3257
3258  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3259  if (SpecReg == "spsr")
3260    FlagsVal |= 16;
3261
3262  Parser.Lex(); // Eat identifier token.
3263  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3264  return MatchOperand_Success;
3265}
3266
3267ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3268parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3269            int Low, int High) {
3270  const AsmToken &Tok = Parser.getTok();
3271  if (Tok.isNot(AsmToken::Identifier)) {
3272    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3273    return MatchOperand_ParseFail;
3274  }
3275  StringRef ShiftName = Tok.getString();
3276  std::string LowerOp = Op.lower();
3277  std::string UpperOp = Op.upper();
3278  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3279    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3280    return MatchOperand_ParseFail;
3281  }
3282  Parser.Lex(); // Eat shift type token.
3283
3284  // There must be a '#' and a shift amount.
3285  if (Parser.getTok().isNot(AsmToken::Hash) &&
3286      Parser.getTok().isNot(AsmToken::Dollar)) {
3287    Error(Parser.getTok().getLoc(), "'#' expected");
3288    return MatchOperand_ParseFail;
3289  }
3290  Parser.Lex(); // Eat hash token.
3291
3292  const MCExpr *ShiftAmount;
3293  SMLoc Loc = Parser.getTok().getLoc();
3294  if (getParser().ParseExpression(ShiftAmount)) {
3295    Error(Loc, "illegal expression");
3296    return MatchOperand_ParseFail;
3297  }
3298  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3299  if (!CE) {
3300    Error(Loc, "constant expression expected");
3301    return MatchOperand_ParseFail;
3302  }
3303  int Val = CE->getValue();
3304  if (Val < Low || Val > High) {
3305    Error(Loc, "immediate value out of range");
3306    return MatchOperand_ParseFail;
3307  }
3308
3309  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3310
3311  return MatchOperand_Success;
3312}
3313
3314ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3315parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3316  const AsmToken &Tok = Parser.getTok();
3317  SMLoc S = Tok.getLoc();
3318  if (Tok.isNot(AsmToken::Identifier)) {
3319    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3320    return MatchOperand_ParseFail;
3321  }
3322  int Val = StringSwitch<int>(Tok.getString())
3323    .Case("be", 1)
3324    .Case("le", 0)
3325    .Default(-1);
3326  Parser.Lex(); // Eat the token.
3327
3328  if (Val == -1) {
3329    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3330    return MatchOperand_ParseFail;
3331  }
3332  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3333                                                                  getContext()),
3334                                           S, Parser.getTok().getLoc()));
3335  return MatchOperand_Success;
3336}
3337
3338/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3339/// instructions. Legal values are:
3340///     lsl #n  'n' in [0,31]
3341///     asr #n  'n' in [1,32]
3342///             n == 32 encoded as n == 0.
3343ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3344parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3345  const AsmToken &Tok = Parser.getTok();
3346  SMLoc S = Tok.getLoc();
3347  if (Tok.isNot(AsmToken::Identifier)) {
3348    Error(S, "shift operator 'asr' or 'lsl' expected");
3349    return MatchOperand_ParseFail;
3350  }
3351  StringRef ShiftName = Tok.getString();
3352  bool isASR;
3353  if (ShiftName == "lsl" || ShiftName == "LSL")
3354    isASR = false;
3355  else if (ShiftName == "asr" || ShiftName == "ASR")
3356    isASR = true;
3357  else {
3358    Error(S, "shift operator 'asr' or 'lsl' expected");
3359    return MatchOperand_ParseFail;
3360  }
3361  Parser.Lex(); // Eat the operator.
3362
3363  // A '#' and a shift amount.
3364  if (Parser.getTok().isNot(AsmToken::Hash) &&
3365      Parser.getTok().isNot(AsmToken::Dollar)) {
3366    Error(Parser.getTok().getLoc(), "'#' expected");
3367    return MatchOperand_ParseFail;
3368  }
3369  Parser.Lex(); // Eat hash token.
3370
3371  const MCExpr *ShiftAmount;
3372  SMLoc E = Parser.getTok().getLoc();
3373  if (getParser().ParseExpression(ShiftAmount)) {
3374    Error(E, "malformed shift expression");
3375    return MatchOperand_ParseFail;
3376  }
3377  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3378  if (!CE) {
3379    Error(E, "shift amount must be an immediate");
3380    return MatchOperand_ParseFail;
3381  }
3382
3383  int64_t Val = CE->getValue();
3384  if (isASR) {
3385    // Shift amount must be in [1,32]
3386    if (Val < 1 || Val > 32) {
3387      Error(E, "'asr' shift amount must be in range [1,32]");
3388      return MatchOperand_ParseFail;
3389    }
3390    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3391    if (isThumb() && Val == 32) {
3392      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3393      return MatchOperand_ParseFail;
3394    }
3395    if (Val == 32) Val = 0;
3396  } else {
3397    // Shift amount must be in [1,32]
3398    if (Val < 0 || Val > 31) {
3399      Error(E, "'lsr' shift amount must be in range [0,31]");
3400      return MatchOperand_ParseFail;
3401    }
3402  }
3403
3404  E = Parser.getTok().getLoc();
3405  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3406
3407  return MatchOperand_Success;
3408}
3409
3410/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3411/// of instructions. Legal values are:
3412///     ror #n  'n' in {0, 8, 16, 24}
3413ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3414parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3415  const AsmToken &Tok = Parser.getTok();
3416  SMLoc S = Tok.getLoc();
3417  if (Tok.isNot(AsmToken::Identifier))
3418    return MatchOperand_NoMatch;
3419  StringRef ShiftName = Tok.getString();
3420  if (ShiftName != "ror" && ShiftName != "ROR")
3421    return MatchOperand_NoMatch;
3422  Parser.Lex(); // Eat the operator.
3423
3424  // A '#' and a rotate amount.
3425  if (Parser.getTok().isNot(AsmToken::Hash) &&
3426      Parser.getTok().isNot(AsmToken::Dollar)) {
3427    Error(Parser.getTok().getLoc(), "'#' expected");
3428    return MatchOperand_ParseFail;
3429  }
3430  Parser.Lex(); // Eat hash token.
3431
3432  const MCExpr *ShiftAmount;
3433  SMLoc E = Parser.getTok().getLoc();
3434  if (getParser().ParseExpression(ShiftAmount)) {
3435    Error(E, "malformed rotate expression");
3436    return MatchOperand_ParseFail;
3437  }
3438  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3439  if (!CE) {
3440    Error(E, "rotate amount must be an immediate");
3441    return MatchOperand_ParseFail;
3442  }
3443
3444  int64_t Val = CE->getValue();
3445  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3446  // normally, zero is represented in asm by omitting the rotate operand
3447  // entirely.
3448  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3449    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3450    return MatchOperand_ParseFail;
3451  }
3452
3453  E = Parser.getTok().getLoc();
3454  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3455
3456  return MatchOperand_Success;
3457}
3458
3459ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3460parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3461  SMLoc S = Parser.getTok().getLoc();
3462  // The bitfield descriptor is really two operands, the LSB and the width.
3463  if (Parser.getTok().isNot(AsmToken::Hash) &&
3464      Parser.getTok().isNot(AsmToken::Dollar)) {
3465    Error(Parser.getTok().getLoc(), "'#' expected");
3466    return MatchOperand_ParseFail;
3467  }
3468  Parser.Lex(); // Eat hash token.
3469
3470  const MCExpr *LSBExpr;
3471  SMLoc E = Parser.getTok().getLoc();
3472  if (getParser().ParseExpression(LSBExpr)) {
3473    Error(E, "malformed immediate expression");
3474    return MatchOperand_ParseFail;
3475  }
3476  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3477  if (!CE) {
3478    Error(E, "'lsb' operand must be an immediate");
3479    return MatchOperand_ParseFail;
3480  }
3481
3482  int64_t LSB = CE->getValue();
3483  // The LSB must be in the range [0,31]
3484  if (LSB < 0 || LSB > 31) {
3485    Error(E, "'lsb' operand must be in the range [0,31]");
3486    return MatchOperand_ParseFail;
3487  }
3488  E = Parser.getTok().getLoc();
3489
3490  // Expect another immediate operand.
3491  if (Parser.getTok().isNot(AsmToken::Comma)) {
3492    Error(Parser.getTok().getLoc(), "too few operands");
3493    return MatchOperand_ParseFail;
3494  }
3495  Parser.Lex(); // Eat hash token.
3496  if (Parser.getTok().isNot(AsmToken::Hash) &&
3497      Parser.getTok().isNot(AsmToken::Dollar)) {
3498    Error(Parser.getTok().getLoc(), "'#' expected");
3499    return MatchOperand_ParseFail;
3500  }
3501  Parser.Lex(); // Eat hash token.
3502
3503  const MCExpr *WidthExpr;
3504  if (getParser().ParseExpression(WidthExpr)) {
3505    Error(E, "malformed immediate expression");
3506    return MatchOperand_ParseFail;
3507  }
3508  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3509  if (!CE) {
3510    Error(E, "'width' operand must be an immediate");
3511    return MatchOperand_ParseFail;
3512  }
3513
3514  int64_t Width = CE->getValue();
3515  // The LSB must be in the range [1,32-lsb]
3516  if (Width < 1 || Width > 32 - LSB) {
3517    Error(E, "'width' operand must be in the range [1,32-lsb]");
3518    return MatchOperand_ParseFail;
3519  }
3520  E = Parser.getTok().getLoc();
3521
3522  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3523
3524  return MatchOperand_Success;
3525}
3526
3527ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3528parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3529  // Check for a post-index addressing register operand. Specifically:
3530  // postidx_reg := '+' register {, shift}
3531  //              | '-' register {, shift}
3532  //              | register {, shift}
3533
3534  // This method must return MatchOperand_NoMatch without consuming any tokens
3535  // in the case where there is no match, as other alternatives take other
3536  // parse methods.
3537  AsmToken Tok = Parser.getTok();
3538  SMLoc S = Tok.getLoc();
3539  bool haveEaten = false;
3540  bool isAdd = true;
3541  int Reg = -1;
3542  if (Tok.is(AsmToken::Plus)) {
3543    Parser.Lex(); // Eat the '+' token.
3544    haveEaten = true;
3545  } else if (Tok.is(AsmToken::Minus)) {
3546    Parser.Lex(); // Eat the '-' token.
3547    isAdd = false;
3548    haveEaten = true;
3549  }
3550  if (Parser.getTok().is(AsmToken::Identifier))
3551    Reg = tryParseRegister();
3552  if (Reg == -1) {
3553    if (!haveEaten)
3554      return MatchOperand_NoMatch;
3555    Error(Parser.getTok().getLoc(), "register expected");
3556    return MatchOperand_ParseFail;
3557  }
3558  SMLoc E = Parser.getTok().getLoc();
3559
3560  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3561  unsigned ShiftImm = 0;
3562  if (Parser.getTok().is(AsmToken::Comma)) {
3563    Parser.Lex(); // Eat the ','.
3564    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3565      return MatchOperand_ParseFail;
3566  }
3567
3568  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3569                                                  ShiftImm, S, E));
3570
3571  return MatchOperand_Success;
3572}
3573
3574ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3575parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3576  // Check for a post-index addressing register operand. Specifically:
3577  // am3offset := '+' register
3578  //              | '-' register
3579  //              | register
3580  //              | # imm
3581  //              | # + imm
3582  //              | # - imm
3583
3584  // This method must return MatchOperand_NoMatch without consuming any tokens
3585  // in the case where there is no match, as other alternatives take other
3586  // parse methods.
3587  AsmToken Tok = Parser.getTok();
3588  SMLoc S = Tok.getLoc();
3589
3590  // Do immediates first, as we always parse those if we have a '#'.
3591  if (Parser.getTok().is(AsmToken::Hash) ||
3592      Parser.getTok().is(AsmToken::Dollar)) {
3593    Parser.Lex(); // Eat the '#'.
3594    // Explicitly look for a '-', as we need to encode negative zero
3595    // differently.
3596    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3597    const MCExpr *Offset;
3598    if (getParser().ParseExpression(Offset))
3599      return MatchOperand_ParseFail;
3600    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3601    if (!CE) {
3602      Error(S, "constant expression expected");
3603      return MatchOperand_ParseFail;
3604    }
3605    SMLoc E = Tok.getLoc();
3606    // Negative zero is encoded as the flag value INT32_MIN.
3607    int32_t Val = CE->getValue();
3608    if (isNegative && Val == 0)
3609      Val = INT32_MIN;
3610
3611    Operands.push_back(
3612      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3613
3614    return MatchOperand_Success;
3615  }
3616
3617
3618  bool haveEaten = false;
3619  bool isAdd = true;
3620  int Reg = -1;
3621  if (Tok.is(AsmToken::Plus)) {
3622    Parser.Lex(); // Eat the '+' token.
3623    haveEaten = true;
3624  } else if (Tok.is(AsmToken::Minus)) {
3625    Parser.Lex(); // Eat the '-' token.
3626    isAdd = false;
3627    haveEaten = true;
3628  }
3629  if (Parser.getTok().is(AsmToken::Identifier))
3630    Reg = tryParseRegister();
3631  if (Reg == -1) {
3632    if (!haveEaten)
3633      return MatchOperand_NoMatch;
3634    Error(Parser.getTok().getLoc(), "register expected");
3635    return MatchOperand_ParseFail;
3636  }
3637  SMLoc E = Parser.getTok().getLoc();
3638
3639  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3640                                                  0, S, E));
3641
3642  return MatchOperand_Success;
3643}
3644
3645/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3646/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3647/// when they refer multiple MIOperands inside a single one.
3648bool ARMAsmParser::
3649cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3650             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3651  // Rt, Rt2
3652  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3653  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3654  // Create a writeback register dummy placeholder.
3655  Inst.addOperand(MCOperand::CreateReg(0));
3656  // addr
3657  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3658  // pred
3659  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3660  return true;
3661}
3662
3663/// cvtT2StrdPre - Convert parsed operands to MCInst.
3664/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3665/// when they refer multiple MIOperands inside a single one.
3666bool ARMAsmParser::
3667cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3668             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3669  // Create a writeback register dummy placeholder.
3670  Inst.addOperand(MCOperand::CreateReg(0));
3671  // Rt, Rt2
3672  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3673  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3674  // addr
3675  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3676  // pred
3677  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3678  return true;
3679}
3680
3681/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3682/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3683/// when they refer multiple MIOperands inside a single one.
3684bool ARMAsmParser::
3685cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3686                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3687  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3688
3689  // Create a writeback register dummy placeholder.
3690  Inst.addOperand(MCOperand::CreateImm(0));
3691
3692  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3693  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3694  return true;
3695}
3696
3697/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3698/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3699/// when they refer multiple MIOperands inside a single one.
3700bool ARMAsmParser::
3701cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3702                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3703  // Create a writeback register dummy placeholder.
3704  Inst.addOperand(MCOperand::CreateImm(0));
3705  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3706  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3707  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3708  return true;
3709}
3710
3711/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3712/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3713/// when they refer multiple MIOperands inside a single one.
3714bool ARMAsmParser::
3715cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3716                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3717  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3718
3719  // Create a writeback register dummy placeholder.
3720  Inst.addOperand(MCOperand::CreateImm(0));
3721
3722  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3723  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3724  return true;
3725}
3726
3727/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3728/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3729/// when they refer multiple MIOperands inside a single one.
3730bool ARMAsmParser::
3731cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3732                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3733  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3734
3735  // Create a writeback register dummy placeholder.
3736  Inst.addOperand(MCOperand::CreateImm(0));
3737
3738  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3739  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3740  return true;
3741}
3742
3743
3744/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3745/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3746/// when they refer multiple MIOperands inside a single one.
3747bool ARMAsmParser::
3748cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3749                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3750  // Create a writeback register dummy placeholder.
3751  Inst.addOperand(MCOperand::CreateImm(0));
3752  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3753  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3754  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3755  return true;
3756}
3757
3758/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3759/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3760/// when they refer multiple MIOperands inside a single one.
3761bool ARMAsmParser::
3762cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3763                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3764  // Create a writeback register dummy placeholder.
3765  Inst.addOperand(MCOperand::CreateImm(0));
3766  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3767  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3768  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3769  return true;
3770}
3771
3772/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3773/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3774/// when they refer multiple MIOperands inside a single one.
3775bool ARMAsmParser::
3776cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3777                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3778  // Create a writeback register dummy placeholder.
3779  Inst.addOperand(MCOperand::CreateImm(0));
3780  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3781  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3782  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3783  return true;
3784}
3785
3786/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3787/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3788/// when they refer multiple MIOperands inside a single one.
3789bool ARMAsmParser::
3790cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3791                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3792  // Rt
3793  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3794  // Create a writeback register dummy placeholder.
3795  Inst.addOperand(MCOperand::CreateImm(0));
3796  // addr
3797  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3798  // offset
3799  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3800  // pred
3801  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3802  return true;
3803}
3804
3805/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3806/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3807/// when they refer multiple MIOperands inside a single one.
3808bool ARMAsmParser::
3809cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3810                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3811  // Rt
3812  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3813  // Create a writeback register dummy placeholder.
3814  Inst.addOperand(MCOperand::CreateImm(0));
3815  // addr
3816  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3817  // offset
3818  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3819  // pred
3820  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3821  return true;
3822}
3823
3824/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3825/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3826/// when they refer multiple MIOperands inside a single one.
3827bool ARMAsmParser::
3828cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3829                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3830  // Create a writeback register dummy placeholder.
3831  Inst.addOperand(MCOperand::CreateImm(0));
3832  // Rt
3833  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3834  // addr
3835  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3836  // offset
3837  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3838  // pred
3839  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3840  return true;
3841}
3842
3843/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3844/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3845/// when they refer multiple MIOperands inside a single one.
3846bool ARMAsmParser::
3847cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3848                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3849  // Create a writeback register dummy placeholder.
3850  Inst.addOperand(MCOperand::CreateImm(0));
3851  // Rt
3852  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3853  // addr
3854  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3855  // offset
3856  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3857  // pred
3858  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3859  return true;
3860}
3861
3862/// cvtLdrdPre - Convert parsed operands to MCInst.
3863/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3864/// when they refer multiple MIOperands inside a single one.
3865bool ARMAsmParser::
3866cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3867           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3868  // Rt, Rt2
3869  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3870  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3871  // Create a writeback register dummy placeholder.
3872  Inst.addOperand(MCOperand::CreateImm(0));
3873  // addr
3874  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3875  // pred
3876  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3877  return true;
3878}
3879
3880/// cvtStrdPre - Convert parsed operands to MCInst.
3881/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3882/// when they refer multiple MIOperands inside a single one.
3883bool ARMAsmParser::
3884cvtStrdPre(MCInst &Inst, unsigned Opcode,
3885           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3886  // Create a writeback register dummy placeholder.
3887  Inst.addOperand(MCOperand::CreateImm(0));
3888  // Rt, Rt2
3889  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3890  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3891  // addr
3892  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3893  // pred
3894  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3895  return true;
3896}
3897
3898/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3899/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3900/// when they refer multiple MIOperands inside a single one.
3901bool ARMAsmParser::
3902cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3903                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3904  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3905  // Create a writeback register dummy placeholder.
3906  Inst.addOperand(MCOperand::CreateImm(0));
3907  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3908  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3909  return true;
3910}
3911
3912/// cvtThumbMultiple- Convert parsed operands to MCInst.
3913/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3914/// when they refer multiple MIOperands inside a single one.
3915bool ARMAsmParser::
3916cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3917           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3918  // The second source operand must be the same register as the destination
3919  // operand.
3920  if (Operands.size() == 6 &&
3921      (((ARMOperand*)Operands[3])->getReg() !=
3922       ((ARMOperand*)Operands[5])->getReg()) &&
3923      (((ARMOperand*)Operands[3])->getReg() !=
3924       ((ARMOperand*)Operands[4])->getReg())) {
3925    Error(Operands[3]->getStartLoc(),
3926          "destination register must match source register");
3927    return false;
3928  }
3929  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3930  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3931  // If we have a three-operand form, make sure to set Rn to be the operand
3932  // that isn't the same as Rd.
3933  unsigned RegOp = 4;
3934  if (Operands.size() == 6 &&
3935      ((ARMOperand*)Operands[4])->getReg() ==
3936        ((ARMOperand*)Operands[3])->getReg())
3937    RegOp = 5;
3938  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3939  Inst.addOperand(Inst.getOperand(0));
3940  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3941
3942  return true;
3943}
3944
3945bool ARMAsmParser::
3946cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3947              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3948  // Vd
3949  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3950  // Create a writeback register dummy placeholder.
3951  Inst.addOperand(MCOperand::CreateImm(0));
3952  // Vn
3953  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3954  // pred
3955  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3956  return true;
3957}
3958
3959bool ARMAsmParser::
3960cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3961                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3962  // Vd
3963  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3964  // Create a writeback register dummy placeholder.
3965  Inst.addOperand(MCOperand::CreateImm(0));
3966  // Vn
3967  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3968  // Vm
3969  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3970  // pred
3971  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3972  return true;
3973}
3974
3975bool ARMAsmParser::
3976cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3977              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3978  // Create a writeback register dummy placeholder.
3979  Inst.addOperand(MCOperand::CreateImm(0));
3980  // Vn
3981  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3982  // Vt
3983  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3984  // pred
3985  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3986  return true;
3987}
3988
3989bool ARMAsmParser::
3990cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3991                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3992  // Create a writeback register dummy placeholder.
3993  Inst.addOperand(MCOperand::CreateImm(0));
3994  // Vn
3995  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3996  // Vm
3997  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3998  // Vt
3999  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4000  // pred
4001  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4002  return true;
4003}
4004
4005/// Parse an ARM memory expression, return false if successful else return true
4006/// or an error.  The first token must be a '[' when called.
4007bool ARMAsmParser::
4008parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4009  SMLoc S, E;
4010  assert(Parser.getTok().is(AsmToken::LBrac) &&
4011         "Token is not a Left Bracket");
4012  S = Parser.getTok().getLoc();
4013  Parser.Lex(); // Eat left bracket token.
4014
4015  const AsmToken &BaseRegTok = Parser.getTok();
4016  int BaseRegNum = tryParseRegister();
4017  if (BaseRegNum == -1)
4018    return Error(BaseRegTok.getLoc(), "register expected");
4019
4020  // The next token must either be a comma or a closing bracket.
4021  const AsmToken &Tok = Parser.getTok();
4022  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4023    return Error(Tok.getLoc(), "malformed memory operand");
4024
4025  if (Tok.is(AsmToken::RBrac)) {
4026    E = Tok.getLoc();
4027    Parser.Lex(); // Eat right bracket token.
4028
4029    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4030                                             0, 0, false, S, E));
4031
4032    // If there's a pre-indexing writeback marker, '!', just add it as a token
4033    // operand. It's rather odd, but syntactically valid.
4034    if (Parser.getTok().is(AsmToken::Exclaim)) {
4035      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4036      Parser.Lex(); // Eat the '!'.
4037    }
4038
4039    return false;
4040  }
4041
4042  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4043  Parser.Lex(); // Eat the comma.
4044
4045  // If we have a ':', it's an alignment specifier.
4046  if (Parser.getTok().is(AsmToken::Colon)) {
4047    Parser.Lex(); // Eat the ':'.
4048    E = Parser.getTok().getLoc();
4049
4050    const MCExpr *Expr;
4051    if (getParser().ParseExpression(Expr))
4052     return true;
4053
4054    // The expression has to be a constant. Memory references with relocations
4055    // don't come through here, as they use the <label> forms of the relevant
4056    // instructions.
4057    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4058    if (!CE)
4059      return Error (E, "constant expression expected");
4060
4061    unsigned Align = 0;
4062    switch (CE->getValue()) {
4063    default:
4064      return Error(E,
4065                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4066    case 16:  Align = 2; break;
4067    case 32:  Align = 4; break;
4068    case 64:  Align = 8; break;
4069    case 128: Align = 16; break;
4070    case 256: Align = 32; break;
4071    }
4072
4073    // Now we should have the closing ']'
4074    E = Parser.getTok().getLoc();
4075    if (Parser.getTok().isNot(AsmToken::RBrac))
4076      return Error(E, "']' expected");
4077    Parser.Lex(); // Eat right bracket token.
4078
4079    // Don't worry about range checking the value here. That's handled by
4080    // the is*() predicates.
4081    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4082                                             ARM_AM::no_shift, 0, Align,
4083                                             false, S, E));
4084
4085    // If there's a pre-indexing writeback marker, '!', just add it as a token
4086    // operand.
4087    if (Parser.getTok().is(AsmToken::Exclaim)) {
4088      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4089      Parser.Lex(); // Eat the '!'.
4090    }
4091
4092    return false;
4093  }
4094
4095  // If we have a '#', it's an immediate offset, else assume it's a register
4096  // offset. Be friendly and also accept a plain integer (without a leading
4097  // hash) for gas compatibility.
4098  if (Parser.getTok().is(AsmToken::Hash) ||
4099      Parser.getTok().is(AsmToken::Dollar) ||
4100      Parser.getTok().is(AsmToken::Integer)) {
4101    if (Parser.getTok().isNot(AsmToken::Integer))
4102      Parser.Lex(); // Eat the '#'.
4103    E = Parser.getTok().getLoc();
4104
4105    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4106    const MCExpr *Offset;
4107    if (getParser().ParseExpression(Offset))
4108     return true;
4109
4110    // The expression has to be a constant. Memory references with relocations
4111    // don't come through here, as they use the <label> forms of the relevant
4112    // instructions.
4113    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4114    if (!CE)
4115      return Error (E, "constant expression expected");
4116
4117    // If the constant was #-0, represent it as INT32_MIN.
4118    int32_t Val = CE->getValue();
4119    if (isNegative && Val == 0)
4120      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4121
4122    // Now we should have the closing ']'
4123    E = Parser.getTok().getLoc();
4124    if (Parser.getTok().isNot(AsmToken::RBrac))
4125      return Error(E, "']' expected");
4126    Parser.Lex(); // Eat right bracket token.
4127
4128    // Don't worry about range checking the value here. That's handled by
4129    // the is*() predicates.
4130    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4131                                             ARM_AM::no_shift, 0, 0,
4132                                             false, S, E));
4133
4134    // If there's a pre-indexing writeback marker, '!', just add it as a token
4135    // operand.
4136    if (Parser.getTok().is(AsmToken::Exclaim)) {
4137      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4138      Parser.Lex(); // Eat the '!'.
4139    }
4140
4141    return false;
4142  }
4143
4144  // The register offset is optionally preceded by a '+' or '-'
4145  bool isNegative = false;
4146  if (Parser.getTok().is(AsmToken::Minus)) {
4147    isNegative = true;
4148    Parser.Lex(); // Eat the '-'.
4149  } else if (Parser.getTok().is(AsmToken::Plus)) {
4150    // Nothing to do.
4151    Parser.Lex(); // Eat the '+'.
4152  }
4153
4154  E = Parser.getTok().getLoc();
4155  int OffsetRegNum = tryParseRegister();
4156  if (OffsetRegNum == -1)
4157    return Error(E, "register expected");
4158
4159  // If there's a shift operator, handle it.
4160  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4161  unsigned ShiftImm = 0;
4162  if (Parser.getTok().is(AsmToken::Comma)) {
4163    Parser.Lex(); // Eat the ','.
4164    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4165      return true;
4166  }
4167
4168  // Now we should have the closing ']'
4169  E = Parser.getTok().getLoc();
4170  if (Parser.getTok().isNot(AsmToken::RBrac))
4171    return Error(E, "']' expected");
4172  Parser.Lex(); // Eat right bracket token.
4173
4174  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4175                                           ShiftType, ShiftImm, 0, isNegative,
4176                                           S, E));
4177
4178  // If there's a pre-indexing writeback marker, '!', just add it as a token
4179  // operand.
4180  if (Parser.getTok().is(AsmToken::Exclaim)) {
4181    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4182    Parser.Lex(); // Eat the '!'.
4183  }
4184
4185  return false;
4186}
4187
4188/// parseMemRegOffsetShift - one of these two:
4189///   ( lsl | lsr | asr | ror ) , # shift_amount
4190///   rrx
4191/// return true if it parses a shift otherwise it returns false.
4192bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4193                                          unsigned &Amount) {
4194  SMLoc Loc = Parser.getTok().getLoc();
4195  const AsmToken &Tok = Parser.getTok();
4196  if (Tok.isNot(AsmToken::Identifier))
4197    return true;
4198  StringRef ShiftName = Tok.getString();
4199  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4200      ShiftName == "asl" || ShiftName == "ASL")
4201    St = ARM_AM::lsl;
4202  else if (ShiftName == "lsr" || ShiftName == "LSR")
4203    St = ARM_AM::lsr;
4204  else if (ShiftName == "asr" || ShiftName == "ASR")
4205    St = ARM_AM::asr;
4206  else if (ShiftName == "ror" || ShiftName == "ROR")
4207    St = ARM_AM::ror;
4208  else if (ShiftName == "rrx" || ShiftName == "RRX")
4209    St = ARM_AM::rrx;
4210  else
4211    return Error(Loc, "illegal shift operator");
4212  Parser.Lex(); // Eat shift type token.
4213
4214  // rrx stands alone.
4215  Amount = 0;
4216  if (St != ARM_AM::rrx) {
4217    Loc = Parser.getTok().getLoc();
4218    // A '#' and a shift amount.
4219    const AsmToken &HashTok = Parser.getTok();
4220    if (HashTok.isNot(AsmToken::Hash) &&
4221        HashTok.isNot(AsmToken::Dollar))
4222      return Error(HashTok.getLoc(), "'#' expected");
4223    Parser.Lex(); // Eat hash token.
4224
4225    const MCExpr *Expr;
4226    if (getParser().ParseExpression(Expr))
4227      return true;
4228    // Range check the immediate.
4229    // lsl, ror: 0 <= imm <= 31
4230    // lsr, asr: 0 <= imm <= 32
4231    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4232    if (!CE)
4233      return Error(Loc, "shift amount must be an immediate");
4234    int64_t Imm = CE->getValue();
4235    if (Imm < 0 ||
4236        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4237        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4238      return Error(Loc, "immediate shift value out of range");
4239    Amount = Imm;
4240  }
4241
4242  return false;
4243}
4244
4245/// parseFPImm - A floating point immediate expression operand.
4246ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4247parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4248  SMLoc S = Parser.getTok().getLoc();
4249
4250  if (Parser.getTok().isNot(AsmToken::Hash) &&
4251      Parser.getTok().isNot(AsmToken::Dollar))
4252    return MatchOperand_NoMatch;
4253
4254  // Disambiguate the VMOV forms that can accept an FP immediate.
4255  // vmov.f32 <sreg>, #imm
4256  // vmov.f64 <dreg>, #imm
4257  // vmov.f32 <dreg>, #imm  @ vector f32x2
4258  // vmov.f32 <qreg>, #imm  @ vector f32x4
4259  //
4260  // There are also the NEON VMOV instructions which expect an
4261  // integer constant. Make sure we don't try to parse an FPImm
4262  // for these:
4263  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4264  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4265  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4266                           TyOp->getToken() != ".f64"))
4267    return MatchOperand_NoMatch;
4268
4269  Parser.Lex(); // Eat the '#'.
4270
4271  // Handle negation, as that still comes through as a separate token.
4272  bool isNegative = false;
4273  if (Parser.getTok().is(AsmToken::Minus)) {
4274    isNegative = true;
4275    Parser.Lex();
4276  }
4277  const AsmToken &Tok = Parser.getTok();
4278  if (Tok.is(AsmToken::Real)) {
4279    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4280    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4281    // If we had a '-' in front, toggle the sign bit.
4282    IntVal ^= (uint64_t)isNegative << 63;
4283    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4284    Parser.Lex(); // Eat the token.
4285    if (Val == -1) {
4286      TokError("floating point value out of range");
4287      return MatchOperand_ParseFail;
4288    }
4289    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4290    return MatchOperand_Success;
4291  }
4292  if (Tok.is(AsmToken::Integer)) {
4293    int64_t Val = Tok.getIntVal();
4294    Parser.Lex(); // Eat the token.
4295    if (Val > 255 || Val < 0) {
4296      TokError("encoded floating point value out of range");
4297      return MatchOperand_ParseFail;
4298    }
4299    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4300    return MatchOperand_Success;
4301  }
4302
4303  TokError("invalid floating point immediate");
4304  return MatchOperand_ParseFail;
4305}
4306/// Parse a arm instruction operand.  For now this parses the operand regardless
4307/// of the mnemonic.
4308bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4309                                StringRef Mnemonic) {
4310  SMLoc S, E;
4311
4312  // Check if the current operand has a custom associated parser, if so, try to
4313  // custom parse the operand, or fallback to the general approach.
4314  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4315  if (ResTy == MatchOperand_Success)
4316    return false;
4317  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4318  // there was a match, but an error occurred, in which case, just return that
4319  // the operand parsing failed.
4320  if (ResTy == MatchOperand_ParseFail)
4321    return true;
4322
4323  switch (getLexer().getKind()) {
4324  default:
4325    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4326    return true;
4327  case AsmToken::Identifier: {
4328    if (!tryParseRegisterWithWriteBack(Operands))
4329      return false;
4330    int Res = tryParseShiftRegister(Operands);
4331    if (Res == 0) // success
4332      return false;
4333    else if (Res == -1) // irrecoverable error
4334      return true;
4335    // If this is VMRS, check for the apsr_nzcv operand.
4336    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4337      S = Parser.getTok().getLoc();
4338      Parser.Lex();
4339      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4340      return false;
4341    }
4342
4343    // Fall though for the Identifier case that is not a register or a
4344    // special name.
4345  }
4346  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4347  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4348  case AsmToken::String:  // quoted label names.
4349  case AsmToken::Dot: {   // . as a branch target
4350    // This was not a register so parse other operands that start with an
4351    // identifier (like labels) as expressions and create them as immediates.
4352    const MCExpr *IdVal;
4353    S = Parser.getTok().getLoc();
4354    if (getParser().ParseExpression(IdVal))
4355      return true;
4356    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4357    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4358    return false;
4359  }
4360  case AsmToken::LBrac:
4361    return parseMemory(Operands);
4362  case AsmToken::LCurly:
4363    return parseRegisterList(Operands);
4364  case AsmToken::Dollar:
4365  case AsmToken::Hash: {
4366    // #42 -> immediate.
4367    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4368    S = Parser.getTok().getLoc();
4369    Parser.Lex();
4370    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4371    const MCExpr *ImmVal;
4372    if (getParser().ParseExpression(ImmVal))
4373      return true;
4374    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4375    if (CE) {
4376      int32_t Val = CE->getValue();
4377      if (isNegative && Val == 0)
4378        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4379    }
4380    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4381    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4382    return false;
4383  }
4384  case AsmToken::Colon: {
4385    // ":lower16:" and ":upper16:" expression prefixes
4386    // FIXME: Check it's an expression prefix,
4387    // e.g. (FOO - :lower16:BAR) isn't legal.
4388    ARMMCExpr::VariantKind RefKind;
4389    if (parsePrefix(RefKind))
4390      return true;
4391
4392    const MCExpr *SubExprVal;
4393    if (getParser().ParseExpression(SubExprVal))
4394      return true;
4395
4396    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4397                                                   getContext());
4398    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4399    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4400    return false;
4401  }
4402  }
4403}
4404
4405// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4406//  :lower16: and :upper16:.
4407bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4408  RefKind = ARMMCExpr::VK_ARM_None;
4409
4410  // :lower16: and :upper16: modifiers
4411  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4412  Parser.Lex(); // Eat ':'
4413
4414  if (getLexer().isNot(AsmToken::Identifier)) {
4415    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4416    return true;
4417  }
4418
4419  StringRef IDVal = Parser.getTok().getIdentifier();
4420  if (IDVal == "lower16") {
4421    RefKind = ARMMCExpr::VK_ARM_LO16;
4422  } else if (IDVal == "upper16") {
4423    RefKind = ARMMCExpr::VK_ARM_HI16;
4424  } else {
4425    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4426    return true;
4427  }
4428  Parser.Lex();
4429
4430  if (getLexer().isNot(AsmToken::Colon)) {
4431    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4432    return true;
4433  }
4434  Parser.Lex(); // Eat the last ':'
4435  return false;
4436}
4437
4438/// \brief Given a mnemonic, split out possible predication code and carry
4439/// setting letters to form a canonical mnemonic and flags.
4440//
4441// FIXME: Would be nice to autogen this.
4442// FIXME: This is a bit of a maze of special cases.
4443StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4444                                      unsigned &PredicationCode,
4445                                      bool &CarrySetting,
4446                                      unsigned &ProcessorIMod,
4447                                      StringRef &ITMask) {
4448  PredicationCode = ARMCC::AL;
4449  CarrySetting = false;
4450  ProcessorIMod = 0;
4451
4452  // Ignore some mnemonics we know aren't predicated forms.
4453  //
4454  // FIXME: Would be nice to autogen this.
4455  if ((Mnemonic == "movs" && isThumb()) ||
4456      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4457      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4458      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4459      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4460      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4461      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4462      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4463      Mnemonic == "fmuls")
4464    return Mnemonic;
4465
4466  // First, split out any predication code. Ignore mnemonics we know aren't
4467  // predicated but do have a carry-set and so weren't caught above.
4468  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4469      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4470      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4471      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4472    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4473      .Case("eq", ARMCC::EQ)
4474      .Case("ne", ARMCC::NE)
4475      .Case("hs", ARMCC::HS)
4476      .Case("cs", ARMCC::HS)
4477      .Case("lo", ARMCC::LO)
4478      .Case("cc", ARMCC::LO)
4479      .Case("mi", ARMCC::MI)
4480      .Case("pl", ARMCC::PL)
4481      .Case("vs", ARMCC::VS)
4482      .Case("vc", ARMCC::VC)
4483      .Case("hi", ARMCC::HI)
4484      .Case("ls", ARMCC::LS)
4485      .Case("ge", ARMCC::GE)
4486      .Case("lt", ARMCC::LT)
4487      .Case("gt", ARMCC::GT)
4488      .Case("le", ARMCC::LE)
4489      .Case("al", ARMCC::AL)
4490      .Default(~0U);
4491    if (CC != ~0U) {
4492      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4493      PredicationCode = CC;
4494    }
4495  }
4496
4497  // Next, determine if we have a carry setting bit. We explicitly ignore all
4498  // the instructions we know end in 's'.
4499  if (Mnemonic.endswith("s") &&
4500      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4501        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4502        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4503        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4504        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4505        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4506        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4507        Mnemonic == "fmuls" ||
4508        (Mnemonic == "movs" && isThumb()))) {
4509    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4510    CarrySetting = true;
4511  }
4512
4513  // The "cps" instruction can have a interrupt mode operand which is glued into
4514  // the mnemonic. Check if this is the case, split it and parse the imod op
4515  if (Mnemonic.startswith("cps")) {
4516    // Split out any imod code.
4517    unsigned IMod =
4518      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4519      .Case("ie", ARM_PROC::IE)
4520      .Case("id", ARM_PROC::ID)
4521      .Default(~0U);
4522    if (IMod != ~0U) {
4523      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4524      ProcessorIMod = IMod;
4525    }
4526  }
4527
4528  // The "it" instruction has the condition mask on the end of the mnemonic.
4529  if (Mnemonic.startswith("it")) {
4530    ITMask = Mnemonic.slice(2, Mnemonic.size());
4531    Mnemonic = Mnemonic.slice(0, 2);
4532  }
4533
4534  return Mnemonic;
4535}
4536
4537/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4538/// inclusion of carry set or predication code operands.
4539//
4540// FIXME: It would be nice to autogen this.
4541void ARMAsmParser::
4542getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4543                      bool &CanAcceptPredicationCode) {
4544  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4545      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4546      Mnemonic == "add" || Mnemonic == "adc" ||
4547      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4548      Mnemonic == "orr" || Mnemonic == "mvn" ||
4549      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4550      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4551      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4552                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4553                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4554    CanAcceptCarrySet = true;
4555  } else
4556    CanAcceptCarrySet = false;
4557
4558  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4559      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4560      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4561      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4562      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4563      (Mnemonic == "clrex" && !isThumb()) ||
4564      (Mnemonic == "nop" && isThumbOne()) ||
4565      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4566        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4567        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4568      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4569       !isThumb()) ||
4570      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4571    CanAcceptPredicationCode = false;
4572  } else
4573    CanAcceptPredicationCode = true;
4574
4575  if (isThumb()) {
4576    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4577        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4578      CanAcceptPredicationCode = false;
4579  }
4580}
4581
4582bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4583                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4584  // FIXME: This is all horribly hacky. We really need a better way to deal
4585  // with optional operands like this in the matcher table.
4586
4587  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4588  // another does not. Specifically, the MOVW instruction does not. So we
4589  // special case it here and remove the defaulted (non-setting) cc_out
4590  // operand if that's the instruction we're trying to match.
4591  //
4592  // We do this as post-processing of the explicit operands rather than just
4593  // conditionally adding the cc_out in the first place because we need
4594  // to check the type of the parsed immediate operand.
4595  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4596      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4597      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4598      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4599    return true;
4600
4601  // Register-register 'add' for thumb does not have a cc_out operand
4602  // when there are only two register operands.
4603  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4604      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4605      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4606      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4607    return true;
4608  // Register-register 'add' for thumb does not have a cc_out operand
4609  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4610  // have to check the immediate range here since Thumb2 has a variant
4611  // that can handle a different range and has a cc_out operand.
4612  if (((isThumb() && Mnemonic == "add") ||
4613       (isThumbTwo() && Mnemonic == "sub")) &&
4614      Operands.size() == 6 &&
4615      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4616      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4617      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4618      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4619      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4620       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4621    return true;
4622  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4623  // imm0_4095 variant. That's the least-preferred variant when
4624  // selecting via the generic "add" mnemonic, so to know that we
4625  // should remove the cc_out operand, we have to explicitly check that
4626  // it's not one of the other variants. Ugh.
4627  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4628      Operands.size() == 6 &&
4629      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4630      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4631      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4632    // Nest conditions rather than one big 'if' statement for readability.
4633    //
4634    // If either register is a high reg, it's either one of the SP
4635    // variants (handled above) or a 32-bit encoding, so we just
4636    // check against T3.
4637    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4638         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4639        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4640      return false;
4641    // If both registers are low, we're in an IT block, and the immediate is
4642    // in range, we should use encoding T1 instead, which has a cc_out.
4643    if (inITBlock() &&
4644        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4645        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4646        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4647      return false;
4648
4649    // Otherwise, we use encoding T4, which does not have a cc_out
4650    // operand.
4651    return true;
4652  }
4653
4654  // The thumb2 multiply instruction doesn't have a CCOut register, so
4655  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4656  // use the 16-bit encoding or not.
4657  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4658      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4659      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4660      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4661      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4662      // If the registers aren't low regs, the destination reg isn't the
4663      // same as one of the source regs, or the cc_out operand is zero
4664      // outside of an IT block, we have to use the 32-bit encoding, so
4665      // remove the cc_out operand.
4666      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4667       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4668       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4669       !inITBlock() ||
4670       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4671        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4672        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4673        static_cast<ARMOperand*>(Operands[4])->getReg())))
4674    return true;
4675
4676  // Also check the 'mul' syntax variant that doesn't specify an explicit
4677  // destination register.
4678  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4679      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4680      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4681      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4682      // If the registers aren't low regs  or the cc_out operand is zero
4683      // outside of an IT block, we have to use the 32-bit encoding, so
4684      // remove the cc_out operand.
4685      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4686       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4687       !inITBlock()))
4688    return true;
4689
4690
4691
4692  // Register-register 'add/sub' for thumb does not have a cc_out operand
4693  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4694  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4695  // right, this will result in better diagnostics (which operand is off)
4696  // anyway.
4697  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4698      (Operands.size() == 5 || Operands.size() == 6) &&
4699      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4700      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4701      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4702    return true;
4703
4704  return false;
4705}
4706
4707static bool isDataTypeToken(StringRef Tok) {
4708  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4709    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4710    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4711    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4712    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4713    Tok == ".f" || Tok == ".d";
4714}
4715
4716// FIXME: This bit should probably be handled via an explicit match class
4717// in the .td files that matches the suffix instead of having it be
4718// a literal string token the way it is now.
4719static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4720  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4721}
4722
4723static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4724/// Parse an arm instruction mnemonic followed by its operands.
4725bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4726                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4727  // Apply mnemonic aliases before doing anything else, as the destination
4728  // mnemnonic may include suffices and we want to handle them normally.
4729  // The generic tblgen'erated code does this later, at the start of
4730  // MatchInstructionImpl(), but that's too late for aliases that include
4731  // any sort of suffix.
4732  unsigned AvailableFeatures = getAvailableFeatures();
4733  applyMnemonicAliases(Name, AvailableFeatures);
4734
4735  // First check for the ARM-specific .req directive.
4736  if (Parser.getTok().is(AsmToken::Identifier) &&
4737      Parser.getTok().getIdentifier() == ".req") {
4738    parseDirectiveReq(Name, NameLoc);
4739    // We always return 'error' for this, as we're done with this
4740    // statement and don't need to match the 'instruction."
4741    return true;
4742  }
4743
4744  // Create the leading tokens for the mnemonic, split by '.' characters.
4745  size_t Start = 0, Next = Name.find('.');
4746  StringRef Mnemonic = Name.slice(Start, Next);
4747
4748  // Split out the predication code and carry setting flag from the mnemonic.
4749  unsigned PredicationCode;
4750  unsigned ProcessorIMod;
4751  bool CarrySetting;
4752  StringRef ITMask;
4753  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4754                           ProcessorIMod, ITMask);
4755
4756  // In Thumb1, only the branch (B) instruction can be predicated.
4757  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4758    Parser.EatToEndOfStatement();
4759    return Error(NameLoc, "conditional execution not supported in Thumb1");
4760  }
4761
4762  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4763
4764  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4765  // is the mask as it will be for the IT encoding if the conditional
4766  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4767  // where the conditional bit0 is zero, the instruction post-processing
4768  // will adjust the mask accordingly.
4769  if (Mnemonic == "it") {
4770    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4771    if (ITMask.size() > 3) {
4772      Parser.EatToEndOfStatement();
4773      return Error(Loc, "too many conditions on IT instruction");
4774    }
4775    unsigned Mask = 8;
4776    for (unsigned i = ITMask.size(); i != 0; --i) {
4777      char pos = ITMask[i - 1];
4778      if (pos != 't' && pos != 'e') {
4779        Parser.EatToEndOfStatement();
4780        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4781      }
4782      Mask >>= 1;
4783      if (ITMask[i - 1] == 't')
4784        Mask |= 8;
4785    }
4786    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4787  }
4788
4789  // FIXME: This is all a pretty gross hack. We should automatically handle
4790  // optional operands like this via tblgen.
4791
4792  // Next, add the CCOut and ConditionCode operands, if needed.
4793  //
4794  // For mnemonics which can ever incorporate a carry setting bit or predication
4795  // code, our matching model involves us always generating CCOut and
4796  // ConditionCode operands to match the mnemonic "as written" and then we let
4797  // the matcher deal with finding the right instruction or generating an
4798  // appropriate error.
4799  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4800  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4801
4802  // If we had a carry-set on an instruction that can't do that, issue an
4803  // error.
4804  if (!CanAcceptCarrySet && CarrySetting) {
4805    Parser.EatToEndOfStatement();
4806    return Error(NameLoc, "instruction '" + Mnemonic +
4807                 "' can not set flags, but 's' suffix specified");
4808  }
4809  // If we had a predication code on an instruction that can't do that, issue an
4810  // error.
4811  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4812    Parser.EatToEndOfStatement();
4813    return Error(NameLoc, "instruction '" + Mnemonic +
4814                 "' is not predicable, but condition code specified");
4815  }
4816
4817  // Add the carry setting operand, if necessary.
4818  if (CanAcceptCarrySet) {
4819    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4820    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4821                                               Loc));
4822  }
4823
4824  // Add the predication code operand, if necessary.
4825  if (CanAcceptPredicationCode) {
4826    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4827                                      CarrySetting);
4828    Operands.push_back(ARMOperand::CreateCondCode(
4829                         ARMCC::CondCodes(PredicationCode), Loc));
4830  }
4831
4832  // Add the processor imod operand, if necessary.
4833  if (ProcessorIMod) {
4834    Operands.push_back(ARMOperand::CreateImm(
4835          MCConstantExpr::Create(ProcessorIMod, getContext()),
4836                                 NameLoc, NameLoc));
4837  }
4838
4839  // Add the remaining tokens in the mnemonic.
4840  while (Next != StringRef::npos) {
4841    Start = Next;
4842    Next = Name.find('.', Start + 1);
4843    StringRef ExtraToken = Name.slice(Start, Next);
4844
4845    // Some NEON instructions have an optional datatype suffix that is
4846    // completely ignored. Check for that.
4847    if (isDataTypeToken(ExtraToken) &&
4848        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4849      continue;
4850
4851    if (ExtraToken != ".n") {
4852      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4853      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4854    }
4855  }
4856
4857  // Read the remaining operands.
4858  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4859    // Read the first operand.
4860    if (parseOperand(Operands, Mnemonic)) {
4861      Parser.EatToEndOfStatement();
4862      return true;
4863    }
4864
4865    while (getLexer().is(AsmToken::Comma)) {
4866      Parser.Lex();  // Eat the comma.
4867
4868      // Parse and remember the operand.
4869      if (parseOperand(Operands, Mnemonic)) {
4870        Parser.EatToEndOfStatement();
4871        return true;
4872      }
4873    }
4874  }
4875
4876  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4877    SMLoc Loc = getLexer().getLoc();
4878    Parser.EatToEndOfStatement();
4879    return Error(Loc, "unexpected token in argument list");
4880  }
4881
4882  Parser.Lex(); // Consume the EndOfStatement
4883
4884  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4885  // do and don't have a cc_out optional-def operand. With some spot-checks
4886  // of the operand list, we can figure out which variant we're trying to
4887  // parse and adjust accordingly before actually matching. We shouldn't ever
4888  // try to remove a cc_out operand that was explicitly set on the the
4889  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4890  // table driven matcher doesn't fit well with the ARM instruction set.
4891  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4892    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4893    Operands.erase(Operands.begin() + 1);
4894    delete Op;
4895  }
4896
4897  // ARM mode 'blx' need special handling, as the register operand version
4898  // is predicable, but the label operand version is not. So, we can't rely
4899  // on the Mnemonic based checking to correctly figure out when to put
4900  // a k_CondCode operand in the list. If we're trying to match the label
4901  // version, remove the k_CondCode operand here.
4902  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4903      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4904    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4905    Operands.erase(Operands.begin() + 1);
4906    delete Op;
4907  }
4908
4909  // The vector-compare-to-zero instructions have a literal token "#0" at
4910  // the end that comes to here as an immediate operand. Convert it to a
4911  // token to play nicely with the matcher.
4912  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4913      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4914      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4915    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4916    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4917    if (CE && CE->getValue() == 0) {
4918      Operands.erase(Operands.begin() + 5);
4919      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4920      delete Op;
4921    }
4922  }
4923  // VCMP{E} does the same thing, but with a different operand count.
4924  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4925      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4926    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4927    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4928    if (CE && CE->getValue() == 0) {
4929      Operands.erase(Operands.begin() + 4);
4930      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4931      delete Op;
4932    }
4933  }
4934  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4935  // end. Convert it to a token here. Take care not to convert those
4936  // that should hit the Thumb2 encoding.
4937  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4938      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4939      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4940      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4941    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4942    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4943    if (CE && CE->getValue() == 0 &&
4944        (isThumbOne() ||
4945         // The cc_out operand matches the IT block.
4946         ((inITBlock() != CarrySetting) &&
4947         // Neither register operand is a high register.
4948         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4949          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4950      Operands.erase(Operands.begin() + 5);
4951      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4952      delete Op;
4953    }
4954  }
4955
4956  return false;
4957}
4958
4959// Validate context-sensitive operand constraints.
4960
4961// return 'true' if register list contains non-low GPR registers,
4962// 'false' otherwise. If Reg is in the register list or is HiReg, set
4963// 'containsReg' to true.
4964static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4965                                 unsigned HiReg, bool &containsReg) {
4966  containsReg = false;
4967  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4968    unsigned OpReg = Inst.getOperand(i).getReg();
4969    if (OpReg == Reg)
4970      containsReg = true;
4971    // Anything other than a low register isn't legal here.
4972    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4973      return true;
4974  }
4975  return false;
4976}
4977
4978// Check if the specified regisgter is in the register list of the inst,
4979// starting at the indicated operand number.
4980static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4981  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4982    unsigned OpReg = Inst.getOperand(i).getReg();
4983    if (OpReg == Reg)
4984      return true;
4985  }
4986  return false;
4987}
4988
4989// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4990// the ARMInsts array) instead. Getting that here requires awkward
4991// API changes, though. Better way?
4992namespace llvm {
4993extern const MCInstrDesc ARMInsts[];
4994}
4995static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4996  return ARMInsts[Opcode];
4997}
4998
4999// FIXME: We would really like to be able to tablegen'erate this.
5000bool ARMAsmParser::
5001validateInstruction(MCInst &Inst,
5002                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5003  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5004  SMLoc Loc = Operands[0]->getStartLoc();
5005  // Check the IT block state first.
5006  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
5007  // being allowed in IT blocks, but not being predicable.  It just always
5008  // executes.
5009  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
5010    unsigned bit = 1;
5011    if (ITState.FirstCond)
5012      ITState.FirstCond = false;
5013    else
5014      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5015    // The instruction must be predicable.
5016    if (!MCID.isPredicable())
5017      return Error(Loc, "instructions in IT block must be predicable");
5018    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5019    unsigned ITCond = bit ? ITState.Cond :
5020      ARMCC::getOppositeCondition(ITState.Cond);
5021    if (Cond != ITCond) {
5022      // Find the condition code Operand to get its SMLoc information.
5023      SMLoc CondLoc;
5024      for (unsigned i = 1; i < Operands.size(); ++i)
5025        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5026          CondLoc = Operands[i]->getStartLoc();
5027      return Error(CondLoc, "incorrect condition in IT block; got '" +
5028                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5029                   "', but expected '" +
5030                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5031    }
5032  // Check for non-'al' condition codes outside of the IT block.
5033  } else if (isThumbTwo() && MCID.isPredicable() &&
5034             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5035             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5036             Inst.getOpcode() != ARM::t2B)
5037    return Error(Loc, "predicated instructions must be in IT block");
5038
5039  switch (Inst.getOpcode()) {
5040  case ARM::LDRD:
5041  case ARM::LDRD_PRE:
5042  case ARM::LDRD_POST:
5043  case ARM::LDREXD: {
5044    // Rt2 must be Rt + 1.
5045    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5046    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5047    if (Rt2 != Rt + 1)
5048      return Error(Operands[3]->getStartLoc(),
5049                   "destination operands must be sequential");
5050    return false;
5051  }
5052  case ARM::STRD: {
5053    // Rt2 must be Rt + 1.
5054    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5055    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5056    if (Rt2 != Rt + 1)
5057      return Error(Operands[3]->getStartLoc(),
5058                   "source operands must be sequential");
5059    return false;
5060  }
5061  case ARM::STRD_PRE:
5062  case ARM::STRD_POST:
5063  case ARM::STREXD: {
5064    // Rt2 must be Rt + 1.
5065    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5066    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5067    if (Rt2 != Rt + 1)
5068      return Error(Operands[3]->getStartLoc(),
5069                   "source operands must be sequential");
5070    return false;
5071  }
5072  case ARM::SBFX:
5073  case ARM::UBFX: {
5074    // width must be in range [1, 32-lsb]
5075    unsigned lsb = Inst.getOperand(2).getImm();
5076    unsigned widthm1 = Inst.getOperand(3).getImm();
5077    if (widthm1 >= 32 - lsb)
5078      return Error(Operands[5]->getStartLoc(),
5079                   "bitfield width must be in range [1,32-lsb]");
5080    return false;
5081  }
5082  case ARM::tLDMIA: {
5083    // If we're parsing Thumb2, the .w variant is available and handles
5084    // most cases that are normally illegal for a Thumb1 LDM
5085    // instruction. We'll make the transformation in processInstruction()
5086    // if necessary.
5087    //
5088    // Thumb LDM instructions are writeback iff the base register is not
5089    // in the register list.
5090    unsigned Rn = Inst.getOperand(0).getReg();
5091    bool hasWritebackToken =
5092      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5093       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5094    bool listContainsBase;
5095    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5096      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5097                   "registers must be in range r0-r7");
5098    // If we should have writeback, then there should be a '!' token.
5099    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5100      return Error(Operands[2]->getStartLoc(),
5101                   "writeback operator '!' expected");
5102    // If we should not have writeback, there must not be a '!'. This is
5103    // true even for the 32-bit wide encodings.
5104    if (listContainsBase && hasWritebackToken)
5105      return Error(Operands[3]->getStartLoc(),
5106                   "writeback operator '!' not allowed when base register "
5107                   "in register list");
5108
5109    break;
5110  }
5111  case ARM::t2LDMIA_UPD: {
5112    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5113      return Error(Operands[4]->getStartLoc(),
5114                   "writeback operator '!' not allowed when base register "
5115                   "in register list");
5116    break;
5117  }
5118  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5119  // so only issue a diagnostic for thumb1. The instructions will be
5120  // switched to the t2 encodings in processInstruction() if necessary.
5121  case ARM::tPOP: {
5122    bool listContainsBase;
5123    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5124        !isThumbTwo())
5125      return Error(Operands[2]->getStartLoc(),
5126                   "registers must be in range r0-r7 or pc");
5127    break;
5128  }
5129  case ARM::tPUSH: {
5130    bool listContainsBase;
5131    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5132        !isThumbTwo())
5133      return Error(Operands[2]->getStartLoc(),
5134                   "registers must be in range r0-r7 or lr");
5135    break;
5136  }
5137  case ARM::tSTMIA_UPD: {
5138    bool listContainsBase;
5139    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5140      return Error(Operands[4]->getStartLoc(),
5141                   "registers must be in range r0-r7");
5142    break;
5143  }
5144  }
5145
5146  return false;
5147}
5148
5149static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5150  switch(Opc) {
5151  default: assert(0 && "unexpected opcode!");
5152  // VST1LN
5153  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5154  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5155  case ARM::VST1LNdWB_fixed_Asm_U8:
5156    Spacing = 1;
5157    return ARM::VST1LNd8_UPD;
5158  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5159  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5160  case ARM::VST1LNdWB_fixed_Asm_U16:
5161    Spacing = 1;
5162    return ARM::VST1LNd16_UPD;
5163  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5164  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5165  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5166    Spacing = 1;
5167    return ARM::VST1LNd32_UPD;
5168  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5169  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5170  case ARM::VST1LNdWB_register_Asm_U8:
5171    Spacing = 1;
5172    return ARM::VST1LNd8_UPD;
5173  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5174  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5175  case ARM::VST1LNdWB_register_Asm_U16:
5176    Spacing = 1;
5177    return ARM::VST1LNd16_UPD;
5178  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5179  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5180  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5181    Spacing = 1;
5182    return ARM::VST1LNd32_UPD;
5183  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5184  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5185  case ARM::VST1LNdAsm_U8:
5186    Spacing = 1;
5187    return ARM::VST1LNd8;
5188  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5189  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5190  case ARM::VST1LNdAsm_U16:
5191    Spacing = 1;
5192    return ARM::VST1LNd16;
5193  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5194  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5195  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5196    Spacing = 1;
5197    return ARM::VST1LNd32;
5198
5199  // VST2LN
5200  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5201  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5202  case ARM::VST2LNdWB_fixed_Asm_U8:
5203    Spacing = 1;
5204    return ARM::VST2LNd8_UPD;
5205  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5206  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5207  case ARM::VST2LNdWB_fixed_Asm_U16:
5208    Spacing = 1;
5209    return ARM::VST2LNd16_UPD;
5210  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5211  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5212  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5213    Spacing = 1;
5214    return ARM::VST2LNd32_UPD;
5215  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5216  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5217  case ARM::VST2LNqWB_fixed_Asm_U16:
5218    Spacing = 2;
5219    return ARM::VST2LNq16_UPD;
5220  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5221  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5222  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5223    Spacing = 2;
5224    return ARM::VST2LNq32_UPD;
5225
5226  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5227  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5228  case ARM::VST2LNdWB_register_Asm_U8:
5229    Spacing = 1;
5230    return ARM::VST2LNd8_UPD;
5231  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5232  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5233  case ARM::VST2LNdWB_register_Asm_U16:
5234    Spacing = 1;
5235    return ARM::VST2LNd16_UPD;
5236  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5237  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5238  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5239    Spacing = 1;
5240    return ARM::VST2LNd32_UPD;
5241  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5242  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5243  case ARM::VST2LNqWB_register_Asm_U16:
5244    Spacing = 2;
5245    return ARM::VST2LNq16_UPD;
5246  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5247  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5248  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5249    Spacing = 2;
5250    return ARM::VST2LNq32_UPD;
5251
5252  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5253  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5254  case ARM::VST2LNdAsm_U8:
5255    Spacing = 1;
5256    return ARM::VST2LNd8;
5257  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5258  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5259  case ARM::VST2LNdAsm_U16:
5260    Spacing = 1;
5261    return ARM::VST2LNd16;
5262  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5263  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5264  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5265    Spacing = 1;
5266    return ARM::VST2LNd32;
5267  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5268  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5269  case ARM::VST2LNqAsm_U16:
5270    Spacing = 2;
5271    return ARM::VST2LNq16;
5272  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5273  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5274  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5275    Spacing = 2;
5276    return ARM::VST2LNq32;
5277  }
5278}
5279
5280static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5281  switch(Opc) {
5282  default: assert(0 && "unexpected opcode!");
5283  // VLD1LN
5284  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5285  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5286  case ARM::VLD1LNdWB_fixed_Asm_U8:
5287    Spacing = 1;
5288    return ARM::VLD1LNd8_UPD;
5289  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5290  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5291  case ARM::VLD1LNdWB_fixed_Asm_U16:
5292    Spacing = 1;
5293    return ARM::VLD1LNd16_UPD;
5294  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5295  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5296  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5297    Spacing = 1;
5298    return ARM::VLD1LNd32_UPD;
5299  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5300  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5301  case ARM::VLD1LNdWB_register_Asm_U8:
5302    Spacing = 1;
5303    return ARM::VLD1LNd8_UPD;
5304  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5305  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5306  case ARM::VLD1LNdWB_register_Asm_U16:
5307    Spacing = 1;
5308    return ARM::VLD1LNd16_UPD;
5309  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5310  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5311  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5312    Spacing = 1;
5313    return ARM::VLD1LNd32_UPD;
5314  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5315  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5316  case ARM::VLD1LNdAsm_U8:
5317    Spacing = 1;
5318    return ARM::VLD1LNd8;
5319  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5320  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5321  case ARM::VLD1LNdAsm_U16:
5322    Spacing = 1;
5323    return ARM::VLD1LNd16;
5324  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5325  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5326  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5327    Spacing = 1;
5328    return ARM::VLD1LNd32;
5329
5330  // VLD2LN
5331  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5332  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5333  case ARM::VLD2LNdWB_fixed_Asm_U8:
5334    Spacing = 1;
5335    return ARM::VLD2LNd8_UPD;
5336  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5337  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5338  case ARM::VLD2LNdWB_fixed_Asm_U16:
5339    Spacing = 1;
5340    return ARM::VLD2LNd16_UPD;
5341  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5342  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5343  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5344    Spacing = 1;
5345    return ARM::VLD2LNd32_UPD;
5346  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5347  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5348  case ARM::VLD2LNqWB_fixed_Asm_U16:
5349    Spacing = 1;
5350    return ARM::VLD2LNq16_UPD;
5351  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5352  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5353  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5354    Spacing = 2;
5355    return ARM::VLD2LNq32_UPD;
5356  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5357  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5358  case ARM::VLD2LNdWB_register_Asm_U8:
5359    Spacing = 1;
5360    return ARM::VLD2LNd8_UPD;
5361  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5362  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5363  case ARM::VLD2LNdWB_register_Asm_U16:
5364    Spacing = 1;
5365    return ARM::VLD2LNd16_UPD;
5366  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5367  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5368  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5369    Spacing = 1;
5370    return ARM::VLD2LNd32_UPD;
5371  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5372  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5373  case ARM::VLD2LNqWB_register_Asm_U16:
5374    Spacing = 2;
5375    return ARM::VLD2LNq16_UPD;
5376  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5377  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5378  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5379    Spacing = 2;
5380    return ARM::VLD2LNq32_UPD;
5381  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5382  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5383  case ARM::VLD2LNdAsm_U8:
5384    Spacing = 1;
5385    return ARM::VLD2LNd8;
5386  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5387  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5388  case ARM::VLD2LNdAsm_U16:
5389    Spacing = 1;
5390    return ARM::VLD2LNd16;
5391  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5392  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5393  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5394    Spacing = 1;
5395    return ARM::VLD2LNd32;
5396  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5397  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5398  case ARM::VLD2LNqAsm_U16:
5399    Spacing = 2;
5400    return ARM::VLD2LNq16;
5401  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5402  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5403  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5404    Spacing = 2;
5405    return ARM::VLD2LNq32;
5406  }
5407}
5408
5409bool ARMAsmParser::
5410processInstruction(MCInst &Inst,
5411                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5412  switch (Inst.getOpcode()) {
5413  // Handle NEON VST complex aliases.
5414  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5415  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5416  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5417  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5418  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5419  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5420  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5421  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5422    MCInst TmpInst;
5423    // Shuffle the operands around so the lane index operand is in the
5424    // right place.
5425    unsigned Spacing;
5426    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5427    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5428    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5429    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5430    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5431    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5432    TmpInst.addOperand(Inst.getOperand(1)); // lane
5433    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5434    TmpInst.addOperand(Inst.getOperand(6));
5435    Inst = TmpInst;
5436    return true;
5437  }
5438
5439  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5440  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5441  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5442  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5443  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5444  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5445  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5446  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5447  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5448  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5449  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5450  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5451  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5452  case ARM::VST2LNqWB_register_Asm_U32: {
5453    MCInst TmpInst;
5454    // Shuffle the operands around so the lane index operand is in the
5455    // right place.
5456    unsigned Spacing;
5457    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5458    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5459    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5460    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5461    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5462    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5463    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5464                                            Spacing));
5465    TmpInst.addOperand(Inst.getOperand(1)); // lane
5466    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5467    TmpInst.addOperand(Inst.getOperand(6));
5468    Inst = TmpInst;
5469    return true;
5470  }
5471  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5472  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5473  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5474  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5475  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5476  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5477  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5478  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5479    MCInst TmpInst;
5480    // Shuffle the operands around so the lane index operand is in the
5481    // right place.
5482    unsigned Spacing;
5483    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5484    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5485    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5486    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5487    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5488    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5489    TmpInst.addOperand(Inst.getOperand(1)); // lane
5490    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5491    TmpInst.addOperand(Inst.getOperand(5));
5492    Inst = TmpInst;
5493    return true;
5494  }
5495
5496  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5497  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5498  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5499  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5500  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5501  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5502  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5503  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5504  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5505  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5506  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5507  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5508  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5509  case ARM::VST2LNqWB_fixed_Asm_U32: {
5510    MCInst TmpInst;
5511    // Shuffle the operands around so the lane index operand is in the
5512    // right place.
5513    unsigned Spacing;
5514    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5515    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5516    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5517    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5518    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5519    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5520    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5521                                            Spacing));
5522    TmpInst.addOperand(Inst.getOperand(1)); // lane
5523    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5524    TmpInst.addOperand(Inst.getOperand(5));
5525    Inst = TmpInst;
5526    return true;
5527  }
5528  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5529  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5530  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5531  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5532  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5533  case ARM::VST1LNdAsm_U32: {
5534    MCInst TmpInst;
5535    // Shuffle the operands around so the lane index operand is in the
5536    // right place.
5537    unsigned Spacing;
5538    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5539    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5540    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5541    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5542    TmpInst.addOperand(Inst.getOperand(1)); // lane
5543    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5544    TmpInst.addOperand(Inst.getOperand(5));
5545    Inst = TmpInst;
5546    return true;
5547  }
5548
5549  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5550  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5551  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5552  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5553  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5554  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5555  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5556  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5557  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5558    MCInst TmpInst;
5559    // Shuffle the operands around so the lane index operand is in the
5560    // right place.
5561    unsigned Spacing;
5562    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5563    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5564    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5565    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5566    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5567                                            Spacing));
5568    TmpInst.addOperand(Inst.getOperand(1)); // lane
5569    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5570    TmpInst.addOperand(Inst.getOperand(5));
5571    Inst = TmpInst;
5572    return true;
5573  }
5574  // Handle NEON VLD complex aliases.
5575  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5576  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5577  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5578  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5579  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5580  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5581  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5582  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5583    MCInst TmpInst;
5584    // Shuffle the operands around so the lane index operand is in the
5585    // right place.
5586    unsigned Spacing;
5587    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5588    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5589    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5590    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5591    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5592    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5593    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5594    TmpInst.addOperand(Inst.getOperand(1)); // lane
5595    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5596    TmpInst.addOperand(Inst.getOperand(6));
5597    Inst = TmpInst;
5598    return true;
5599  }
5600
5601  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5602  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5603  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5604  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5605  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5606  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5607  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5608  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5609  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5610  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5611  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5612  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5613  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5614  case ARM::VLD2LNqWB_register_Asm_U32: {
5615    MCInst TmpInst;
5616    // Shuffle the operands around so the lane index operand is in the
5617    // right place.
5618    unsigned Spacing;
5619    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5620    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5621    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5622                                            Spacing));
5623    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5624    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5625    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5626    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5627    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5628    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5629                                            Spacing));
5630    TmpInst.addOperand(Inst.getOperand(1)); // lane
5631    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5632    TmpInst.addOperand(Inst.getOperand(6));
5633    Inst = TmpInst;
5634    return true;
5635  }
5636
5637  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5638  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5639  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5640  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5641  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5642  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5643  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5644  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5645    MCInst TmpInst;
5646    // Shuffle the operands around so the lane index operand is in the
5647    // right place.
5648    unsigned Spacing;
5649    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5650    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5651    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5652    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5653    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5654    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5655    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5656    TmpInst.addOperand(Inst.getOperand(1)); // lane
5657    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5658    TmpInst.addOperand(Inst.getOperand(5));
5659    Inst = TmpInst;
5660    return true;
5661  }
5662
5663  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5664  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5665  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5666  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5667  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5668  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5669  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5670  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5671  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5672  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5673  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5674  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5675  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5676  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5677    MCInst TmpInst;
5678    // Shuffle the operands around so the lane index operand is in the
5679    // right place.
5680    unsigned Spacing;
5681    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5682    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5683    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5684                                            Spacing));
5685    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5686    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5687    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5688    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5689    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5690    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5691                                            Spacing));
5692    TmpInst.addOperand(Inst.getOperand(1)); // lane
5693    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5694    TmpInst.addOperand(Inst.getOperand(5));
5695    Inst = TmpInst;
5696    return true;
5697  }
5698
5699  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5700  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5701  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5702  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5703  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5704  case ARM::VLD1LNdAsm_U32: {
5705    MCInst TmpInst;
5706    // Shuffle the operands around so the lane index operand is in the
5707    // right place.
5708    unsigned Spacing;
5709    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5710    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5711    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5712    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5713    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5714    TmpInst.addOperand(Inst.getOperand(1)); // lane
5715    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5716    TmpInst.addOperand(Inst.getOperand(5));
5717    Inst = TmpInst;
5718    return true;
5719  }
5720
5721  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5722  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5723  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5724  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5725  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5726  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5727  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5728  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5729  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5730  case ARM::VLD2LNqAsm_U32: {
5731    MCInst TmpInst;
5732    // Shuffle the operands around so the lane index operand is in the
5733    // right place.
5734    unsigned Spacing;
5735    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5736    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5737    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5738                                            Spacing));
5739    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5740    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5741    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5742    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5743                                            Spacing));
5744    TmpInst.addOperand(Inst.getOperand(1)); // lane
5745    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5746    TmpInst.addOperand(Inst.getOperand(5));
5747    Inst = TmpInst;
5748    return true;
5749  }
5750  // Handle the Thumb2 mode MOV complex aliases.
5751  case ARM::t2MOVsr:
5752  case ARM::t2MOVSsr: {
5753    // Which instruction to expand to depends on the CCOut operand and
5754    // whether we're in an IT block if the register operands are low
5755    // registers.
5756    bool isNarrow = false;
5757    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5758        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5759        isARMLowRegister(Inst.getOperand(2).getReg()) &&
5760        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
5761        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
5762      isNarrow = true;
5763    MCInst TmpInst;
5764    unsigned newOpc;
5765    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
5766    default: llvm_unreachable("unexpected opcode!");
5767    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
5768    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
5769    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
5770    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
5771    }
5772    TmpInst.setOpcode(newOpc);
5773    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5774    if (isNarrow)
5775      TmpInst.addOperand(MCOperand::CreateReg(
5776          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5777    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5778    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5779    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5780    TmpInst.addOperand(Inst.getOperand(5));
5781    if (!isNarrow)
5782      TmpInst.addOperand(MCOperand::CreateReg(
5783          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
5784    Inst = TmpInst;
5785    return true;
5786  }
5787  case ARM::t2MOVsi:
5788  case ARM::t2MOVSsi: {
5789    // Which instruction to expand to depends on the CCOut operand and
5790    // whether we're in an IT block if the register operands are low
5791    // registers.
5792    bool isNarrow = false;
5793    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5794        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5795        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5796      isNarrow = true;
5797    MCInst TmpInst;
5798    unsigned newOpc;
5799    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5800    default: llvm_unreachable("unexpected opcode!");
5801    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5802    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5803    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5804    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5805    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
5806    }
5807    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5808    if (Ammount == 32) Ammount = 0;
5809    TmpInst.setOpcode(newOpc);
5810    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5811    if (isNarrow)
5812      TmpInst.addOperand(MCOperand::CreateReg(
5813          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5814    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5815    if (newOpc != ARM::t2RRX)
5816      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5817    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5818    TmpInst.addOperand(Inst.getOperand(4));
5819    if (!isNarrow)
5820      TmpInst.addOperand(MCOperand::CreateReg(
5821          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5822    Inst = TmpInst;
5823    return true;
5824  }
5825  // Handle the ARM mode MOV complex aliases.
5826  case ARM::ASRr:
5827  case ARM::LSRr:
5828  case ARM::LSLr:
5829  case ARM::RORr: {
5830    ARM_AM::ShiftOpc ShiftTy;
5831    switch(Inst.getOpcode()) {
5832    default: llvm_unreachable("unexpected opcode!");
5833    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5834    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5835    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5836    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5837    }
5838    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5839    MCInst TmpInst;
5840    TmpInst.setOpcode(ARM::MOVsr);
5841    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5842    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5843    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5844    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5845    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5846    TmpInst.addOperand(Inst.getOperand(4));
5847    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5848    Inst = TmpInst;
5849    return true;
5850  }
5851  case ARM::ASRi:
5852  case ARM::LSRi:
5853  case ARM::LSLi:
5854  case ARM::RORi: {
5855    ARM_AM::ShiftOpc ShiftTy;
5856    switch(Inst.getOpcode()) {
5857    default: llvm_unreachable("unexpected opcode!");
5858    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5859    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5860    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5861    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5862    }
5863    // A shift by zero is a plain MOVr, not a MOVsi.
5864    unsigned Amt = Inst.getOperand(2).getImm();
5865    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5866    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5867    MCInst TmpInst;
5868    TmpInst.setOpcode(Opc);
5869    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5870    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5871    if (Opc == ARM::MOVsi)
5872      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5873    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5874    TmpInst.addOperand(Inst.getOperand(4));
5875    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5876    Inst = TmpInst;
5877    return true;
5878  }
5879  case ARM::RRXi: {
5880    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5881    MCInst TmpInst;
5882    TmpInst.setOpcode(ARM::MOVsi);
5883    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5884    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5885    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5886    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5887    TmpInst.addOperand(Inst.getOperand(3));
5888    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5889    Inst = TmpInst;
5890    return true;
5891  }
5892  case ARM::t2LDMIA_UPD: {
5893    // If this is a load of a single register, then we should use
5894    // a post-indexed LDR instruction instead, per the ARM ARM.
5895    if (Inst.getNumOperands() != 5)
5896      return false;
5897    MCInst TmpInst;
5898    TmpInst.setOpcode(ARM::t2LDR_POST);
5899    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5900    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5901    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5902    TmpInst.addOperand(MCOperand::CreateImm(4));
5903    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5904    TmpInst.addOperand(Inst.getOperand(3));
5905    Inst = TmpInst;
5906    return true;
5907  }
5908  case ARM::t2STMDB_UPD: {
5909    // If this is a store of a single register, then we should use
5910    // a pre-indexed STR instruction instead, per the ARM ARM.
5911    if (Inst.getNumOperands() != 5)
5912      return false;
5913    MCInst TmpInst;
5914    TmpInst.setOpcode(ARM::t2STR_PRE);
5915    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5916    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5917    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5918    TmpInst.addOperand(MCOperand::CreateImm(-4));
5919    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5920    TmpInst.addOperand(Inst.getOperand(3));
5921    Inst = TmpInst;
5922    return true;
5923  }
5924  case ARM::LDMIA_UPD:
5925    // If this is a load of a single register via a 'pop', then we should use
5926    // a post-indexed LDR instruction instead, per the ARM ARM.
5927    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5928        Inst.getNumOperands() == 5) {
5929      MCInst TmpInst;
5930      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5931      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5932      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5933      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5934      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5935      TmpInst.addOperand(MCOperand::CreateImm(4));
5936      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5937      TmpInst.addOperand(Inst.getOperand(3));
5938      Inst = TmpInst;
5939      return true;
5940    }
5941    break;
5942  case ARM::STMDB_UPD:
5943    // If this is a store of a single register via a 'push', then we should use
5944    // a pre-indexed STR instruction instead, per the ARM ARM.
5945    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5946        Inst.getNumOperands() == 5) {
5947      MCInst TmpInst;
5948      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5949      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5950      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5951      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5952      TmpInst.addOperand(MCOperand::CreateImm(-4));
5953      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5954      TmpInst.addOperand(Inst.getOperand(3));
5955      Inst = TmpInst;
5956    }
5957    break;
5958  case ARM::t2ADDri12:
5959    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5960    // mnemonic was used (not "addw"), encoding T3 is preferred.
5961    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5962        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5963      break;
5964    Inst.setOpcode(ARM::t2ADDri);
5965    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5966    break;
5967  case ARM::t2SUBri12:
5968    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5969    // mnemonic was used (not "subw"), encoding T3 is preferred.
5970    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5971        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5972      break;
5973    Inst.setOpcode(ARM::t2SUBri);
5974    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5975    break;
5976  case ARM::tADDi8:
5977    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5978    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5979    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5980    // to encoding T1 if <Rd> is omitted."
5981    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5982      Inst.setOpcode(ARM::tADDi3);
5983      return true;
5984    }
5985    break;
5986  case ARM::tSUBi8:
5987    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5988    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5989    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5990    // to encoding T1 if <Rd> is omitted."
5991    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5992      Inst.setOpcode(ARM::tSUBi3);
5993      return true;
5994    }
5995    break;
5996  case ARM::t2ADDrr: {
5997    // If the destination and first source operand are the same, and
5998    // there's no setting of the flags, use encoding T2 instead of T3.
5999    // Note that this is only for ADD, not SUB. This mirrors the system
6000    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6001    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6002        Inst.getOperand(5).getReg() != 0 ||
6003        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6004         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6005      break;
6006    MCInst TmpInst;
6007    TmpInst.setOpcode(ARM::tADDhirr);
6008    TmpInst.addOperand(Inst.getOperand(0));
6009    TmpInst.addOperand(Inst.getOperand(0));
6010    TmpInst.addOperand(Inst.getOperand(2));
6011    TmpInst.addOperand(Inst.getOperand(3));
6012    TmpInst.addOperand(Inst.getOperand(4));
6013    Inst = TmpInst;
6014    return true;
6015  }
6016  case ARM::tB:
6017    // A Thumb conditional branch outside of an IT block is a tBcc.
6018    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6019      Inst.setOpcode(ARM::tBcc);
6020      return true;
6021    }
6022    break;
6023  case ARM::t2B:
6024    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6025    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6026      Inst.setOpcode(ARM::t2Bcc);
6027      return true;
6028    }
6029    break;
6030  case ARM::t2Bcc:
6031    // If the conditional is AL or we're in an IT block, we really want t2B.
6032    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6033      Inst.setOpcode(ARM::t2B);
6034      return true;
6035    }
6036    break;
6037  case ARM::tBcc:
6038    // If the conditional is AL, we really want tB.
6039    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6040      Inst.setOpcode(ARM::tB);
6041      return true;
6042    }
6043    break;
6044  case ARM::tLDMIA: {
6045    // If the register list contains any high registers, or if the writeback
6046    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6047    // instead if we're in Thumb2. Otherwise, this should have generated
6048    // an error in validateInstruction().
6049    unsigned Rn = Inst.getOperand(0).getReg();
6050    bool hasWritebackToken =
6051      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6052       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6053    bool listContainsBase;
6054    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6055        (!listContainsBase && !hasWritebackToken) ||
6056        (listContainsBase && hasWritebackToken)) {
6057      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6058      assert (isThumbTwo());
6059      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6060      // If we're switching to the updating version, we need to insert
6061      // the writeback tied operand.
6062      if (hasWritebackToken)
6063        Inst.insert(Inst.begin(),
6064                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6065      return true;
6066    }
6067    break;
6068  }
6069  case ARM::tSTMIA_UPD: {
6070    // If the register list contains any high registers, we need to use
6071    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6072    // should have generated an error in validateInstruction().
6073    unsigned Rn = Inst.getOperand(0).getReg();
6074    bool listContainsBase;
6075    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6076      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6077      assert (isThumbTwo());
6078      Inst.setOpcode(ARM::t2STMIA_UPD);
6079      return true;
6080    }
6081    break;
6082  }
6083  case ARM::tPOP: {
6084    bool listContainsBase;
6085    // If the register list contains any high registers, we need to use
6086    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6087    // should have generated an error in validateInstruction().
6088    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6089      return false;
6090    assert (isThumbTwo());
6091    Inst.setOpcode(ARM::t2LDMIA_UPD);
6092    // Add the base register and writeback operands.
6093    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6094    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6095    return true;
6096  }
6097  case ARM::tPUSH: {
6098    bool listContainsBase;
6099    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6100      return false;
6101    assert (isThumbTwo());
6102    Inst.setOpcode(ARM::t2STMDB_UPD);
6103    // Add the base register and writeback operands.
6104    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6105    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6106    return true;
6107  }
6108  case ARM::t2MOVi: {
6109    // If we can use the 16-bit encoding and the user didn't explicitly
6110    // request the 32-bit variant, transform it here.
6111    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6112        Inst.getOperand(1).getImm() <= 255 &&
6113        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6114         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6115        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6116        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6117         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6118      // The operands aren't in the same order for tMOVi8...
6119      MCInst TmpInst;
6120      TmpInst.setOpcode(ARM::tMOVi8);
6121      TmpInst.addOperand(Inst.getOperand(0));
6122      TmpInst.addOperand(Inst.getOperand(4));
6123      TmpInst.addOperand(Inst.getOperand(1));
6124      TmpInst.addOperand(Inst.getOperand(2));
6125      TmpInst.addOperand(Inst.getOperand(3));
6126      Inst = TmpInst;
6127      return true;
6128    }
6129    break;
6130  }
6131  case ARM::t2MOVr: {
6132    // If we can use the 16-bit encoding and the user didn't explicitly
6133    // request the 32-bit variant, transform it here.
6134    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6135        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6136        Inst.getOperand(2).getImm() == ARMCC::AL &&
6137        Inst.getOperand(4).getReg() == ARM::CPSR &&
6138        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6139         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6140      // The operands aren't the same for tMOV[S]r... (no cc_out)
6141      MCInst TmpInst;
6142      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6143      TmpInst.addOperand(Inst.getOperand(0));
6144      TmpInst.addOperand(Inst.getOperand(1));
6145      TmpInst.addOperand(Inst.getOperand(2));
6146      TmpInst.addOperand(Inst.getOperand(3));
6147      Inst = TmpInst;
6148      return true;
6149    }
6150    break;
6151  }
6152  case ARM::t2SXTH:
6153  case ARM::t2SXTB:
6154  case ARM::t2UXTH:
6155  case ARM::t2UXTB: {
6156    // If we can use the 16-bit encoding and the user didn't explicitly
6157    // request the 32-bit variant, transform it here.
6158    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6159        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6160        Inst.getOperand(2).getImm() == 0 &&
6161        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6162         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6163      unsigned NewOpc;
6164      switch (Inst.getOpcode()) {
6165      default: llvm_unreachable("Illegal opcode!");
6166      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6167      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6168      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6169      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6170      }
6171      // The operands aren't the same for thumb1 (no rotate operand).
6172      MCInst TmpInst;
6173      TmpInst.setOpcode(NewOpc);
6174      TmpInst.addOperand(Inst.getOperand(0));
6175      TmpInst.addOperand(Inst.getOperand(1));
6176      TmpInst.addOperand(Inst.getOperand(3));
6177      TmpInst.addOperand(Inst.getOperand(4));
6178      Inst = TmpInst;
6179      return true;
6180    }
6181    break;
6182  }
6183  case ARM::MOVsi: {
6184    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6185    if (SOpc == ARM_AM::rrx) return false;
6186    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6187      // Shifting by zero is accepted as a vanilla 'MOVr'
6188      MCInst TmpInst;
6189      TmpInst.setOpcode(ARM::MOVr);
6190      TmpInst.addOperand(Inst.getOperand(0));
6191      TmpInst.addOperand(Inst.getOperand(1));
6192      TmpInst.addOperand(Inst.getOperand(3));
6193      TmpInst.addOperand(Inst.getOperand(4));
6194      TmpInst.addOperand(Inst.getOperand(5));
6195      Inst = TmpInst;
6196      return true;
6197    }
6198    return false;
6199  }
6200  case ARM::t2IT: {
6201    // The mask bits for all but the first condition are represented as
6202    // the low bit of the condition code value implies 't'. We currently
6203    // always have 1 implies 't', so XOR toggle the bits if the low bit
6204    // of the condition code is zero. The encoding also expects the low
6205    // bit of the condition to be encoded as bit 4 of the mask operand,
6206    // so mask that in if needed
6207    MCOperand &MO = Inst.getOperand(1);
6208    unsigned Mask = MO.getImm();
6209    unsigned OrigMask = Mask;
6210    unsigned TZ = CountTrailingZeros_32(Mask);
6211    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6212      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6213      for (unsigned i = 3; i != TZ; --i)
6214        Mask ^= 1 << i;
6215    } else
6216      Mask |= 0x10;
6217    MO.setImm(Mask);
6218
6219    // Set up the IT block state according to the IT instruction we just
6220    // matched.
6221    assert(!inITBlock() && "nested IT blocks?!");
6222    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6223    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6224    ITState.CurPosition = 0;
6225    ITState.FirstCond = true;
6226    break;
6227  }
6228  }
6229  return false;
6230}
6231
6232unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6233  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6234  // suffix depending on whether they're in an IT block or not.
6235  unsigned Opc = Inst.getOpcode();
6236  const MCInstrDesc &MCID = getInstDesc(Opc);
6237  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6238    assert(MCID.hasOptionalDef() &&
6239           "optionally flag setting instruction missing optional def operand");
6240    assert(MCID.NumOperands == Inst.getNumOperands() &&
6241           "operand count mismatch!");
6242    // Find the optional-def operand (cc_out).
6243    unsigned OpNo;
6244    for (OpNo = 0;
6245         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6246         ++OpNo)
6247      ;
6248    // If we're parsing Thumb1, reject it completely.
6249    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6250      return Match_MnemonicFail;
6251    // If we're parsing Thumb2, which form is legal depends on whether we're
6252    // in an IT block.
6253    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6254        !inITBlock())
6255      return Match_RequiresITBlock;
6256    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6257        inITBlock())
6258      return Match_RequiresNotITBlock;
6259  }
6260  // Some high-register supporting Thumb1 encodings only allow both registers
6261  // to be from r0-r7 when in Thumb2.
6262  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6263           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6264           isARMLowRegister(Inst.getOperand(2).getReg()))
6265    return Match_RequiresThumb2;
6266  // Others only require ARMv6 or later.
6267  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6268           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6269           isARMLowRegister(Inst.getOperand(1).getReg()))
6270    return Match_RequiresV6;
6271  return Match_Success;
6272}
6273
6274bool ARMAsmParser::
6275MatchAndEmitInstruction(SMLoc IDLoc,
6276                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6277                        MCStreamer &Out) {
6278  MCInst Inst;
6279  unsigned ErrorInfo;
6280  unsigned MatchResult;
6281  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6282  switch (MatchResult) {
6283  default: break;
6284  case Match_Success:
6285    // Context sensitive operand constraints aren't handled by the matcher,
6286    // so check them here.
6287    if (validateInstruction(Inst, Operands)) {
6288      // Still progress the IT block, otherwise one wrong condition causes
6289      // nasty cascading errors.
6290      forwardITPosition();
6291      return true;
6292    }
6293
6294    // Some instructions need post-processing to, for example, tweak which
6295    // encoding is selected. Loop on it while changes happen so the
6296    // individual transformations can chain off each other. E.g.,
6297    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6298    while (processInstruction(Inst, Operands))
6299      ;
6300
6301    // Only move forward at the very end so that everything in validate
6302    // and process gets a consistent answer about whether we're in an IT
6303    // block.
6304    forwardITPosition();
6305
6306    Out.EmitInstruction(Inst);
6307    return false;
6308  case Match_MissingFeature:
6309    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6310    return true;
6311  case Match_InvalidOperand: {
6312    SMLoc ErrorLoc = IDLoc;
6313    if (ErrorInfo != ~0U) {
6314      if (ErrorInfo >= Operands.size())
6315        return Error(IDLoc, "too few operands for instruction");
6316
6317      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6318      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6319    }
6320
6321    return Error(ErrorLoc, "invalid operand for instruction");
6322  }
6323  case Match_MnemonicFail:
6324    return Error(IDLoc, "invalid instruction");
6325  case Match_ConversionFail:
6326    // The converter function will have already emited a diagnostic.
6327    return true;
6328  case Match_RequiresNotITBlock:
6329    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6330  case Match_RequiresITBlock:
6331    return Error(IDLoc, "instruction only valid inside IT block");
6332  case Match_RequiresV6:
6333    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6334  case Match_RequiresThumb2:
6335    return Error(IDLoc, "instruction variant requires Thumb2");
6336  }
6337
6338  llvm_unreachable("Implement any new match types added!");
6339  return true;
6340}
6341
6342/// parseDirective parses the arm specific directives
6343bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6344  StringRef IDVal = DirectiveID.getIdentifier();
6345  if (IDVal == ".word")
6346    return parseDirectiveWord(4, DirectiveID.getLoc());
6347  else if (IDVal == ".thumb")
6348    return parseDirectiveThumb(DirectiveID.getLoc());
6349  else if (IDVal == ".arm")
6350    return parseDirectiveARM(DirectiveID.getLoc());
6351  else if (IDVal == ".thumb_func")
6352    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6353  else if (IDVal == ".code")
6354    return parseDirectiveCode(DirectiveID.getLoc());
6355  else if (IDVal == ".syntax")
6356    return parseDirectiveSyntax(DirectiveID.getLoc());
6357  else if (IDVal == ".unreq")
6358    return parseDirectiveUnreq(DirectiveID.getLoc());
6359  else if (IDVal == ".arch")
6360    return parseDirectiveArch(DirectiveID.getLoc());
6361  else if (IDVal == ".eabi_attribute")
6362    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6363  return true;
6364}
6365
6366/// parseDirectiveWord
6367///  ::= .word [ expression (, expression)* ]
6368bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6369  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6370    for (;;) {
6371      const MCExpr *Value;
6372      if (getParser().ParseExpression(Value))
6373        return true;
6374
6375      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6376
6377      if (getLexer().is(AsmToken::EndOfStatement))
6378        break;
6379
6380      // FIXME: Improve diagnostic.
6381      if (getLexer().isNot(AsmToken::Comma))
6382        return Error(L, "unexpected token in directive");
6383      Parser.Lex();
6384    }
6385  }
6386
6387  Parser.Lex();
6388  return false;
6389}
6390
6391/// parseDirectiveThumb
6392///  ::= .thumb
6393bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6394  if (getLexer().isNot(AsmToken::EndOfStatement))
6395    return Error(L, "unexpected token in directive");
6396  Parser.Lex();
6397
6398  if (!isThumb())
6399    SwitchMode();
6400  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6401  return false;
6402}
6403
6404/// parseDirectiveARM
6405///  ::= .arm
6406bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6407  if (getLexer().isNot(AsmToken::EndOfStatement))
6408    return Error(L, "unexpected token in directive");
6409  Parser.Lex();
6410
6411  if (isThumb())
6412    SwitchMode();
6413  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6414  return false;
6415}
6416
6417/// parseDirectiveThumbFunc
6418///  ::= .thumbfunc symbol_name
6419bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6420  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6421  bool isMachO = MAI.hasSubsectionsViaSymbols();
6422  StringRef Name;
6423  bool needFuncName = true;
6424
6425  // Darwin asm has (optionally) function name after .thumb_func direction
6426  // ELF doesn't
6427  if (isMachO) {
6428    const AsmToken &Tok = Parser.getTok();
6429    if (Tok.isNot(AsmToken::EndOfStatement)) {
6430      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6431        return Error(L, "unexpected token in .thumb_func directive");
6432      Name = Tok.getIdentifier();
6433      Parser.Lex(); // Consume the identifier token.
6434      needFuncName = false;
6435    }
6436  }
6437
6438  if (getLexer().isNot(AsmToken::EndOfStatement))
6439    return Error(L, "unexpected token in directive");
6440
6441  // Eat the end of statement and any blank lines that follow.
6442  while (getLexer().is(AsmToken::EndOfStatement))
6443    Parser.Lex();
6444
6445  // FIXME: assuming function name will be the line following .thumb_func
6446  // We really should be checking the next symbol definition even if there's
6447  // stuff in between.
6448  if (needFuncName) {
6449    Name = Parser.getTok().getIdentifier();
6450  }
6451
6452  // Mark symbol as a thumb symbol.
6453  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6454  getParser().getStreamer().EmitThumbFunc(Func);
6455  return false;
6456}
6457
6458/// parseDirectiveSyntax
6459///  ::= .syntax unified | divided
6460bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6461  const AsmToken &Tok = Parser.getTok();
6462  if (Tok.isNot(AsmToken::Identifier))
6463    return Error(L, "unexpected token in .syntax directive");
6464  StringRef Mode = Tok.getString();
6465  if (Mode == "unified" || Mode == "UNIFIED")
6466    Parser.Lex();
6467  else if (Mode == "divided" || Mode == "DIVIDED")
6468    return Error(L, "'.syntax divided' arm asssembly not supported");
6469  else
6470    return Error(L, "unrecognized syntax mode in .syntax directive");
6471
6472  if (getLexer().isNot(AsmToken::EndOfStatement))
6473    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6474  Parser.Lex();
6475
6476  // TODO tell the MC streamer the mode
6477  // getParser().getStreamer().Emit???();
6478  return false;
6479}
6480
6481/// parseDirectiveCode
6482///  ::= .code 16 | 32
6483bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6484  const AsmToken &Tok = Parser.getTok();
6485  if (Tok.isNot(AsmToken::Integer))
6486    return Error(L, "unexpected token in .code directive");
6487  int64_t Val = Parser.getTok().getIntVal();
6488  if (Val == 16)
6489    Parser.Lex();
6490  else if (Val == 32)
6491    Parser.Lex();
6492  else
6493    return Error(L, "invalid operand to .code directive");
6494
6495  if (getLexer().isNot(AsmToken::EndOfStatement))
6496    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6497  Parser.Lex();
6498
6499  if (Val == 16) {
6500    if (!isThumb())
6501      SwitchMode();
6502    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6503  } else {
6504    if (isThumb())
6505      SwitchMode();
6506    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6507  }
6508
6509  return false;
6510}
6511
6512/// parseDirectiveReq
6513///  ::= name .req registername
6514bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6515  Parser.Lex(); // Eat the '.req' token.
6516  unsigned Reg;
6517  SMLoc SRegLoc, ERegLoc;
6518  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6519    Parser.EatToEndOfStatement();
6520    return Error(SRegLoc, "register name expected");
6521  }
6522
6523  // Shouldn't be anything else.
6524  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6525    Parser.EatToEndOfStatement();
6526    return Error(Parser.getTok().getLoc(),
6527                 "unexpected input in .req directive.");
6528  }
6529
6530  Parser.Lex(); // Consume the EndOfStatement
6531
6532  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6533    return Error(SRegLoc, "redefinition of '" + Name +
6534                          "' does not match original.");
6535
6536  return false;
6537}
6538
6539/// parseDirectiveUneq
6540///  ::= .unreq registername
6541bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6542  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6543    Parser.EatToEndOfStatement();
6544    return Error(L, "unexpected input in .unreq directive.");
6545  }
6546  RegisterReqs.erase(Parser.getTok().getIdentifier());
6547  Parser.Lex(); // Eat the identifier.
6548  return false;
6549}
6550
6551/// parseDirectiveArch
6552///  ::= .arch token
6553bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6554  return true;
6555}
6556
6557/// parseDirectiveEabiAttr
6558///  ::= .eabi_attribute int, int
6559bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6560  return true;
6561}
6562
6563extern "C" void LLVMInitializeARMAsmLexer();
6564
6565/// Force static initialization.
6566extern "C" void LLVMInitializeARMAsmParser() {
6567  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6568  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6569  LLVMInitializeARMAsmLexer();
6570}
6571
6572#define GET_REGISTER_MATCHER
6573#define GET_MATCHER_IMPLEMENTATION
6574#include "ARMGenAsmMatcher.inc"
6575