ARMAsmParser.cpp revision aee718beac4fada5914d773db38002d95cae5e0d
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_FPImmediate,
274    k_MemBarrierOpt,
275    k_Memory,
276    k_PostIndexRegister,
277    k_MSRMask,
278    k_ProcIFlags,
279    k_VectorIndex,
280    k_Register,
281    k_RegisterList,
282    k_DPRRegisterList,
283    k_SPRRegisterList,
284    k_VectorList,
285    k_VectorListAllLanes,
286    k_VectorListIndexed,
287    k_ShiftedRegister,
288    k_ShiftedImmediate,
289    k_ShifterImmediate,
290    k_RotateImmediate,
291    k_BitfieldDescriptor,
292    k_Token
293  } Kind;
294
295  SMLoc StartLoc, EndLoc;
296  SmallVector<unsigned, 8> Registers;
297
298  union {
299    struct {
300      ARMCC::CondCodes Val;
301    } CC;
302
303    struct {
304      unsigned Val;
305    } Cop;
306
307    struct {
308      unsigned Val;
309    } CoprocOption;
310
311    struct {
312      unsigned Mask:4;
313    } ITMask;
314
315    struct {
316      ARM_MB::MemBOpt Val;
317    } MBOpt;
318
319    struct {
320      ARM_PROC::IFlags Val;
321    } IFlags;
322
323    struct {
324      unsigned Val;
325    } MMask;
326
327    struct {
328      const char *Data;
329      unsigned Length;
330    } Tok;
331
332    struct {
333      unsigned RegNum;
334    } Reg;
335
336    // A vector register list is a sequential list of 1 to 4 registers.
337    struct {
338      unsigned RegNum;
339      unsigned Count;
340      unsigned LaneIndex;
341      bool isDoubleSpaced;
342    } VectorList;
343
344    struct {
345      unsigned Val;
346    } VectorIndex;
347
348    struct {
349      const MCExpr *Val;
350    } Imm;
351
352    struct {
353      unsigned Val;       // encoded 8-bit representation
354    } FPImm;
355
356    /// Combined record for all forms of ARM address expressions.
357    struct {
358      unsigned BaseRegNum;
359      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
360      // was specified.
361      const MCConstantExpr *OffsetImm;  // Offset immediate value
362      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
363      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
364      unsigned ShiftImm;        // shift for OffsetReg.
365      unsigned Alignment;       // 0 = no alignment specified
366                                // n = alignment in bytes (2, 4, 8, 16, or 32)
367      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
368    } Memory;
369
370    struct {
371      unsigned RegNum;
372      bool isAdd;
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned ShiftImm;
375    } PostIdxReg;
376
377    struct {
378      bool isASR;
379      unsigned Imm;
380    } ShifterImm;
381    struct {
382      ARM_AM::ShiftOpc ShiftTy;
383      unsigned SrcReg;
384      unsigned ShiftReg;
385      unsigned ShiftImm;
386    } RegShiftedReg;
387    struct {
388      ARM_AM::ShiftOpc ShiftTy;
389      unsigned SrcReg;
390      unsigned ShiftImm;
391    } RegShiftedImm;
392    struct {
393      unsigned Imm;
394    } RotImm;
395    struct {
396      unsigned LSB;
397      unsigned Width;
398    } Bitfield;
399  };
400
401  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
402public:
403  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
404    Kind = o.Kind;
405    StartLoc = o.StartLoc;
406    EndLoc = o.EndLoc;
407    switch (Kind) {
408    case k_CondCode:
409      CC = o.CC;
410      break;
411    case k_ITCondMask:
412      ITMask = o.ITMask;
413      break;
414    case k_Token:
415      Tok = o.Tok;
416      break;
417    case k_CCOut:
418    case k_Register:
419      Reg = o.Reg;
420      break;
421    case k_RegisterList:
422    case k_DPRRegisterList:
423    case k_SPRRegisterList:
424      Registers = o.Registers;
425      break;
426    case k_VectorList:
427    case k_VectorListAllLanes:
428    case k_VectorListIndexed:
429      VectorList = o.VectorList;
430      break;
431    case k_CoprocNum:
432    case k_CoprocReg:
433      Cop = o.Cop;
434      break;
435    case k_CoprocOption:
436      CoprocOption = o.CoprocOption;
437      break;
438    case k_Immediate:
439      Imm = o.Imm;
440      break;
441    case k_FPImmediate:
442      FPImm = o.FPImm;
443      break;
444    case k_MemBarrierOpt:
445      MBOpt = o.MBOpt;
446      break;
447    case k_Memory:
448      Memory = o.Memory;
449      break;
450    case k_PostIndexRegister:
451      PostIdxReg = o.PostIdxReg;
452      break;
453    case k_MSRMask:
454      MMask = o.MMask;
455      break;
456    case k_ProcIFlags:
457      IFlags = o.IFlags;
458      break;
459    case k_ShifterImmediate:
460      ShifterImm = o.ShifterImm;
461      break;
462    case k_ShiftedRegister:
463      RegShiftedReg = o.RegShiftedReg;
464      break;
465    case k_ShiftedImmediate:
466      RegShiftedImm = o.RegShiftedImm;
467      break;
468    case k_RotateImmediate:
469      RotImm = o.RotImm;
470      break;
471    case k_BitfieldDescriptor:
472      Bitfield = o.Bitfield;
473      break;
474    case k_VectorIndex:
475      VectorIndex = o.VectorIndex;
476      break;
477    }
478  }
479
480  /// getStartLoc - Get the location of the first token of this operand.
481  SMLoc getStartLoc() const { return StartLoc; }
482  /// getEndLoc - Get the location of the last token of this operand.
483  SMLoc getEndLoc() const { return EndLoc; }
484
485  ARMCC::CondCodes getCondCode() const {
486    assert(Kind == k_CondCode && "Invalid access!");
487    return CC.Val;
488  }
489
490  unsigned getCoproc() const {
491    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
492    return Cop.Val;
493  }
494
495  StringRef getToken() const {
496    assert(Kind == k_Token && "Invalid access!");
497    return StringRef(Tok.Data, Tok.Length);
498  }
499
500  unsigned getReg() const {
501    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
502    return Reg.RegNum;
503  }
504
505  const SmallVectorImpl<unsigned> &getRegList() const {
506    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
507            Kind == k_SPRRegisterList) && "Invalid access!");
508    return Registers;
509  }
510
511  const MCExpr *getImm() const {
512    assert(Kind == k_Immediate && "Invalid access!");
513    return Imm.Val;
514  }
515
516  unsigned getFPImm() const {
517    assert(Kind == k_FPImmediate && "Invalid access!");
518    return FPImm.Val;
519  }
520
521  unsigned getVectorIndex() const {
522    assert(Kind == k_VectorIndex && "Invalid access!");
523    return VectorIndex.Val;
524  }
525
526  ARM_MB::MemBOpt getMemBarrierOpt() const {
527    assert(Kind == k_MemBarrierOpt && "Invalid access!");
528    return MBOpt.Val;
529  }
530
531  ARM_PROC::IFlags getProcIFlags() const {
532    assert(Kind == k_ProcIFlags && "Invalid access!");
533    return IFlags.Val;
534  }
535
536  unsigned getMSRMask() const {
537    assert(Kind == k_MSRMask && "Invalid access!");
538    return MMask.Val;
539  }
540
541  bool isCoprocNum() const { return Kind == k_CoprocNum; }
542  bool isCoprocReg() const { return Kind == k_CoprocReg; }
543  bool isCoprocOption() const { return Kind == k_CoprocOption; }
544  bool isCondCode() const { return Kind == k_CondCode; }
545  bool isCCOut() const { return Kind == k_CCOut; }
546  bool isITMask() const { return Kind == k_ITCondMask; }
547  bool isITCondCode() const { return Kind == k_CondCode; }
548  bool isImm() const { return Kind == k_Immediate; }
549  bool isFPImm() const { return Kind == k_FPImmediate; }
550  bool isImm8s4() const {
551    if (Kind != k_Immediate)
552      return false;
553    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
554    if (!CE) return false;
555    int64_t Value = CE->getValue();
556    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
557  }
558  bool isImm0_1020s4() const {
559    if (Kind != k_Immediate)
560      return false;
561    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
562    if (!CE) return false;
563    int64_t Value = CE->getValue();
564    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
565  }
566  bool isImm0_508s4() const {
567    if (Kind != k_Immediate)
568      return false;
569    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
570    if (!CE) return false;
571    int64_t Value = CE->getValue();
572    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
573  }
574  bool isImm0_255() const {
575    if (Kind != k_Immediate)
576      return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return Value >= 0 && Value < 256;
581  }
582  bool isImm0_1() const {
583    if (Kind != k_Immediate)
584      return false;
585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
586    if (!CE) return false;
587    int64_t Value = CE->getValue();
588    return Value >= 0 && Value < 2;
589  }
590  bool isImm0_3() const {
591    if (Kind != k_Immediate)
592      return false;
593    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
594    if (!CE) return false;
595    int64_t Value = CE->getValue();
596    return Value >= 0 && Value < 4;
597  }
598  bool isImm0_7() const {
599    if (Kind != k_Immediate)
600      return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 8;
605  }
606  bool isImm0_15() const {
607    if (Kind != k_Immediate)
608      return false;
609    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
610    if (!CE) return false;
611    int64_t Value = CE->getValue();
612    return Value >= 0 && Value < 16;
613  }
614  bool isImm0_31() const {
615    if (Kind != k_Immediate)
616      return false;
617    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
618    if (!CE) return false;
619    int64_t Value = CE->getValue();
620    return Value >= 0 && Value < 32;
621  }
622  bool isImm0_63() const {
623    if (Kind != k_Immediate)
624      return false;
625    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
626    if (!CE) return false;
627    int64_t Value = CE->getValue();
628    return Value >= 0 && Value < 64;
629  }
630  bool isImm8() const {
631    if (Kind != k_Immediate)
632      return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 8;
637  }
638  bool isImm16() const {
639    if (Kind != k_Immediate)
640      return false;
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642    if (!CE) return false;
643    int64_t Value = CE->getValue();
644    return Value == 16;
645  }
646  bool isImm32() const {
647    if (Kind != k_Immediate)
648      return false;
649    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
650    if (!CE) return false;
651    int64_t Value = CE->getValue();
652    return Value == 32;
653  }
654  bool isShrImm8() const {
655    if (Kind != k_Immediate)
656      return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 8;
661  }
662  bool isShrImm16() const {
663    if (Kind != k_Immediate)
664      return false;
665    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666    if (!CE) return false;
667    int64_t Value = CE->getValue();
668    return Value > 0 && Value <= 16;
669  }
670  bool isShrImm32() const {
671    if (Kind != k_Immediate)
672      return false;
673    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
674    if (!CE) return false;
675    int64_t Value = CE->getValue();
676    return Value > 0 && Value <= 32;
677  }
678  bool isShrImm64() const {
679    if (Kind != k_Immediate)
680      return false;
681    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
682    if (!CE) return false;
683    int64_t Value = CE->getValue();
684    return Value > 0 && Value <= 64;
685  }
686  bool isImm1_7() const {
687    if (Kind != k_Immediate)
688      return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value > 0 && Value < 8;
693  }
694  bool isImm1_15() const {
695    if (Kind != k_Immediate)
696      return false;
697    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
698    if (!CE) return false;
699    int64_t Value = CE->getValue();
700    return Value > 0 && Value < 16;
701  }
702  bool isImm1_31() const {
703    if (Kind != k_Immediate)
704      return false;
705    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706    if (!CE) return false;
707    int64_t Value = CE->getValue();
708    return Value > 0 && Value < 32;
709  }
710  bool isImm1_16() const {
711    if (Kind != k_Immediate)
712      return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value > 0 && Value < 17;
717  }
718  bool isImm1_32() const {
719    if (Kind != k_Immediate)
720      return false;
721    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
722    if (!CE) return false;
723    int64_t Value = CE->getValue();
724    return Value > 0 && Value < 33;
725  }
726  bool isImm0_32() const {
727    if (Kind != k_Immediate)
728      return false;
729    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
730    if (!CE) return false;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value < 33;
733  }
734  bool isImm0_65535() const {
735    if (Kind != k_Immediate)
736      return false;
737    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
738    if (!CE) return false;
739    int64_t Value = CE->getValue();
740    return Value >= 0 && Value < 65536;
741  }
742  bool isImm0_65535Expr() const {
743    if (Kind != k_Immediate)
744      return false;
745    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
746    // If it's not a constant expression, it'll generate a fixup and be
747    // handled later.
748    if (!CE) return true;
749    int64_t Value = CE->getValue();
750    return Value >= 0 && Value < 65536;
751  }
752  bool isImm24bit() const {
753    if (Kind != k_Immediate)
754      return false;
755    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
756    if (!CE) return false;
757    int64_t Value = CE->getValue();
758    return Value >= 0 && Value <= 0xffffff;
759  }
760  bool isImmThumbSR() const {
761    if (Kind != k_Immediate)
762      return false;
763    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764    if (!CE) return false;
765    int64_t Value = CE->getValue();
766    return Value > 0 && Value < 33;
767  }
768  bool isPKHLSLImm() const {
769    if (Kind != k_Immediate)
770      return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return Value >= 0 && Value < 32;
775  }
776  bool isPKHASRImm() const {
777    if (Kind != k_Immediate)
778      return false;
779    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
780    if (!CE) return false;
781    int64_t Value = CE->getValue();
782    return Value > 0 && Value <= 32;
783  }
784  bool isARMSOImm() const {
785    if (Kind != k_Immediate)
786      return false;
787    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
788    if (!CE) return false;
789    int64_t Value = CE->getValue();
790    return ARM_AM::getSOImmVal(Value) != -1;
791  }
792  bool isARMSOImmNot() const {
793    if (Kind != k_Immediate)
794      return false;
795    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
796    if (!CE) return false;
797    int64_t Value = CE->getValue();
798    return ARM_AM::getSOImmVal(~Value) != -1;
799  }
800  bool isARMSOImmNeg() const {
801    if (Kind != k_Immediate)
802      return false;
803    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
804    if (!CE) return false;
805    int64_t Value = CE->getValue();
806    return ARM_AM::getSOImmVal(-Value) != -1;
807  }
808  bool isT2SOImm() const {
809    if (Kind != k_Immediate)
810      return false;
811    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
812    if (!CE) return false;
813    int64_t Value = CE->getValue();
814    return ARM_AM::getT2SOImmVal(Value) != -1;
815  }
816  bool isT2SOImmNot() const {
817    if (Kind != k_Immediate)
818      return false;
819    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
820    if (!CE) return false;
821    int64_t Value = CE->getValue();
822    return ARM_AM::getT2SOImmVal(~Value) != -1;
823  }
824  bool isT2SOImmNeg() const {
825    if (Kind != k_Immediate)
826      return false;
827    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828    if (!CE) return false;
829    int64_t Value = CE->getValue();
830    return ARM_AM::getT2SOImmVal(-Value) != -1;
831  }
832  bool isSetEndImm() const {
833    if (Kind != k_Immediate)
834      return false;
835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
836    if (!CE) return false;
837    int64_t Value = CE->getValue();
838    return Value == 1 || Value == 0;
839  }
840  bool isReg() const { return Kind == k_Register; }
841  bool isRegList() const { return Kind == k_RegisterList; }
842  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
843  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
844  bool isToken() const { return Kind == k_Token; }
845  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
846  bool isMemory() const { return Kind == k_Memory; }
847  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
848  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
849  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
850  bool isRotImm() const { return Kind == k_RotateImmediate; }
851  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
852  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
853  bool isPostIdxReg() const {
854    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
855  }
856  bool isMemNoOffset(bool alignOK = false) const {
857    if (!isMemory())
858      return false;
859    // No offset of any kind.
860    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
861     (alignOK || Memory.Alignment == 0);
862  }
863  bool isAlignedMemory() const {
864    return isMemNoOffset(true);
865  }
866  bool isAddrMode2() const {
867    if (!isMemory() || Memory.Alignment != 0) return false;
868    // Check for register offset.
869    if (Memory.OffsetRegNum) return true;
870    // Immediate offset in range [-4095, 4095].
871    if (!Memory.OffsetImm) return true;
872    int64_t Val = Memory.OffsetImm->getValue();
873    return Val > -4096 && Val < 4096;
874  }
875  bool isAM2OffsetImm() const {
876    if (Kind != k_Immediate)
877      return false;
878    // Immediate offset in range [-4095, 4095].
879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
880    if (!CE) return false;
881    int64_t Val = CE->getValue();
882    return Val > -4096 && Val < 4096;
883  }
884  bool isAddrMode3() const {
885    // If we have an immediate that's not a constant, treat it as a label
886    // reference needing a fixup. If it is a constant, it's something else
887    // and we reject it.
888    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
889      return true;
890    if (!isMemory() || Memory.Alignment != 0) return false;
891    // No shifts are legal for AM3.
892    if (Memory.ShiftType != ARM_AM::no_shift) return false;
893    // Check for register offset.
894    if (Memory.OffsetRegNum) return true;
895    // Immediate offset in range [-255, 255].
896    if (!Memory.OffsetImm) return true;
897    int64_t Val = Memory.OffsetImm->getValue();
898    return Val > -256 && Val < 256;
899  }
900  bool isAM3Offset() const {
901    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
902      return false;
903    if (Kind == k_PostIndexRegister)
904      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
905    // Immediate offset in range [-255, 255].
906    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
907    if (!CE) return false;
908    int64_t Val = CE->getValue();
909    // Special case, #-0 is INT32_MIN.
910    return (Val > -256 && Val < 256) || Val == INT32_MIN;
911  }
912  bool isAddrMode5() const {
913    // If we have an immediate that's not a constant, treat it as a label
914    // reference needing a fixup. If it is a constant, it's something else
915    // and we reject it.
916    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
917      return true;
918    if (!isMemory() || Memory.Alignment != 0) return false;
919    // Check for register offset.
920    if (Memory.OffsetRegNum) return false;
921    // Immediate offset in range [-1020, 1020] and a multiple of 4.
922    if (!Memory.OffsetImm) return true;
923    int64_t Val = Memory.OffsetImm->getValue();
924    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
925      Val == INT32_MIN;
926  }
927  bool isMemTBB() const {
928    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
929        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
930      return false;
931    return true;
932  }
933  bool isMemTBH() const {
934    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
935        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
936        Memory.Alignment != 0 )
937      return false;
938    return true;
939  }
940  bool isMemRegOffset() const {
941    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
942      return false;
943    return true;
944  }
945  bool isT2MemRegOffset() const {
946    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
947        Memory.Alignment != 0)
948      return false;
949    // Only lsl #{0, 1, 2, 3} allowed.
950    if (Memory.ShiftType == ARM_AM::no_shift)
951      return true;
952    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
953      return false;
954    return true;
955  }
956  bool isMemThumbRR() const {
957    // Thumb reg+reg addressing is simple. Just two registers, a base and
958    // an offset. No shifts, negations or any other complicating factors.
959    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
960        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
961      return false;
962    return isARMLowRegister(Memory.BaseRegNum) &&
963      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
964  }
965  bool isMemThumbRIs4() const {
966    if (!isMemory() || Memory.OffsetRegNum != 0 ||
967        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
968      return false;
969    // Immediate offset, multiple of 4 in range [0, 124].
970    if (!Memory.OffsetImm) return true;
971    int64_t Val = Memory.OffsetImm->getValue();
972    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
973  }
974  bool isMemThumbRIs2() const {
975    if (!isMemory() || Memory.OffsetRegNum != 0 ||
976        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
977      return false;
978    // Immediate offset, multiple of 4 in range [0, 62].
979    if (!Memory.OffsetImm) return true;
980    int64_t Val = Memory.OffsetImm->getValue();
981    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
982  }
983  bool isMemThumbRIs1() const {
984    if (!isMemory() || Memory.OffsetRegNum != 0 ||
985        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
986      return false;
987    // Immediate offset in range [0, 31].
988    if (!Memory.OffsetImm) return true;
989    int64_t Val = Memory.OffsetImm->getValue();
990    return Val >= 0 && Val <= 31;
991  }
992  bool isMemThumbSPI() const {
993    if (!isMemory() || Memory.OffsetRegNum != 0 ||
994        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
995      return false;
996    // Immediate offset, multiple of 4 in range [0, 1020].
997    if (!Memory.OffsetImm) return true;
998    int64_t Val = Memory.OffsetImm->getValue();
999    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1000  }
1001  bool isMemImm8s4Offset() const {
1002    // If we have an immediate that's not a constant, treat it as a label
1003    // reference needing a fixup. If it is a constant, it's something else
1004    // and we reject it.
1005    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1006      return true;
1007    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1008      return false;
1009    // Immediate offset a multiple of 4 in range [-1020, 1020].
1010    if (!Memory.OffsetImm) return true;
1011    int64_t Val = Memory.OffsetImm->getValue();
1012    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1013  }
1014  bool isMemImm0_1020s4Offset() const {
1015    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1016      return false;
1017    // Immediate offset a multiple of 4 in range [0, 1020].
1018    if (!Memory.OffsetImm) return true;
1019    int64_t Val = Memory.OffsetImm->getValue();
1020    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1021  }
1022  bool isMemImm8Offset() const {
1023    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1024      return false;
1025    // Immediate offset in range [-255, 255].
1026    if (!Memory.OffsetImm) return true;
1027    int64_t Val = Memory.OffsetImm->getValue();
1028    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1029  }
1030  bool isMemPosImm8Offset() const {
1031    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1032      return false;
1033    // Immediate offset in range [0, 255].
1034    if (!Memory.OffsetImm) return true;
1035    int64_t Val = Memory.OffsetImm->getValue();
1036    return Val >= 0 && Val < 256;
1037  }
1038  bool isMemNegImm8Offset() const {
1039    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1040      return false;
1041    // Immediate offset in range [-255, -1].
1042    if (!Memory.OffsetImm) return false;
1043    int64_t Val = Memory.OffsetImm->getValue();
1044    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1045  }
1046  bool isMemUImm12Offset() const {
1047    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1048      return false;
1049    // Immediate offset in range [0, 4095].
1050    if (!Memory.OffsetImm) return true;
1051    int64_t Val = Memory.OffsetImm->getValue();
1052    return (Val >= 0 && Val < 4096);
1053  }
1054  bool isMemImm12Offset() const {
1055    // If we have an immediate that's not a constant, treat it as a label
1056    // reference needing a fixup. If it is a constant, it's something else
1057    // and we reject it.
1058    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1059      return true;
1060
1061    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1062      return false;
1063    // Immediate offset in range [-4095, 4095].
1064    if (!Memory.OffsetImm) return true;
1065    int64_t Val = Memory.OffsetImm->getValue();
1066    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1067  }
1068  bool isPostIdxImm8() const {
1069    if (Kind != k_Immediate)
1070      return false;
1071    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1072    if (!CE) return false;
1073    int64_t Val = CE->getValue();
1074    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1075  }
1076  bool isPostIdxImm8s4() const {
1077    if (Kind != k_Immediate)
1078      return false;
1079    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1080    if (!CE) return false;
1081    int64_t Val = CE->getValue();
1082    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1083      (Val == INT32_MIN);
1084  }
1085
1086  bool isMSRMask() const { return Kind == k_MSRMask; }
1087  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1088
1089  // NEON operands.
1090  bool isSingleSpacedVectorList() const {
1091    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1092  }
1093  bool isDoubleSpacedVectorList() const {
1094    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1095  }
1096  bool isVecListOneD() const {
1097    if (!isSingleSpacedVectorList()) return false;
1098    return VectorList.Count == 1;
1099  }
1100
1101  bool isVecListTwoD() const {
1102    if (!isSingleSpacedVectorList()) return false;
1103    return VectorList.Count == 2;
1104  }
1105
1106  bool isVecListThreeD() const {
1107    if (!isSingleSpacedVectorList()) return false;
1108    return VectorList.Count == 3;
1109  }
1110
1111  bool isVecListFourD() const {
1112    if (!isSingleSpacedVectorList()) return false;
1113    return VectorList.Count == 4;
1114  }
1115
1116  bool isVecListTwoQ() const {
1117    if (!isDoubleSpacedVectorList()) return false;
1118    return VectorList.Count == 2;
1119  }
1120
1121  bool isVecListOneDAllLanes() const {
1122    if (Kind != k_VectorListAllLanes) return false;
1123    return VectorList.Count == 1;
1124  }
1125
1126  bool isVecListTwoDAllLanes() const {
1127    if (Kind != k_VectorListAllLanes) return false;
1128    return VectorList.Count == 2;
1129  }
1130
1131  bool isSingleSpacedVectorIndexed() const {
1132    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1133  }
1134  bool isDoubleSpacedVectorIndexed() const {
1135    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1136  }
1137  bool isVecListOneDByteIndexed() const {
1138    if (!isSingleSpacedVectorIndexed()) return false;
1139    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1140  }
1141
1142  bool isVecListOneDHWordIndexed() const {
1143    if (!isSingleSpacedVectorIndexed()) return false;
1144    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1145  }
1146
1147  bool isVecListOneDWordIndexed() const {
1148    if (!isSingleSpacedVectorIndexed()) return false;
1149    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1150  }
1151
1152  bool isVecListTwoDByteIndexed() const {
1153    if (!isSingleSpacedVectorIndexed()) return false;
1154    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1155  }
1156
1157  bool isVecListTwoDHWordIndexed() const {
1158    if (!isSingleSpacedVectorIndexed()) return false;
1159    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1160  }
1161
1162  bool isVecListTwoQWordIndexed() const {
1163    if (!isDoubleSpacedVectorIndexed()) return false;
1164    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1165  }
1166
1167  bool isVecListTwoQHWordIndexed() const {
1168    if (!isDoubleSpacedVectorIndexed()) return false;
1169    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1170  }
1171
1172  bool isVecListTwoDWordIndexed() const {
1173    if (!isSingleSpacedVectorIndexed()) return false;
1174    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1175  }
1176
1177  bool isVectorIndex8() const {
1178    if (Kind != k_VectorIndex) return false;
1179    return VectorIndex.Val < 8;
1180  }
1181  bool isVectorIndex16() const {
1182    if (Kind != k_VectorIndex) return false;
1183    return VectorIndex.Val < 4;
1184  }
1185  bool isVectorIndex32() const {
1186    if (Kind != k_VectorIndex) return false;
1187    return VectorIndex.Val < 2;
1188  }
1189
1190  bool isNEONi8splat() const {
1191    if (Kind != k_Immediate)
1192      return false;
1193    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1194    // Must be a constant.
1195    if (!CE) return false;
1196    int64_t Value = CE->getValue();
1197    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1198    // value.
1199    return Value >= 0 && Value < 256;
1200  }
1201
1202  bool isNEONi16splat() const {
1203    if (Kind != k_Immediate)
1204      return false;
1205    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1206    // Must be a constant.
1207    if (!CE) return false;
1208    int64_t Value = CE->getValue();
1209    // i16 value in the range [0,255] or [0x0100, 0xff00]
1210    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1211  }
1212
1213  bool isNEONi32splat() const {
1214    if (Kind != k_Immediate)
1215      return false;
1216    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1217    // Must be a constant.
1218    if (!CE) return false;
1219    int64_t Value = CE->getValue();
1220    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1221    return (Value >= 0 && Value < 256) ||
1222      (Value >= 0x0100 && Value <= 0xff00) ||
1223      (Value >= 0x010000 && Value <= 0xff0000) ||
1224      (Value >= 0x01000000 && Value <= 0xff000000);
1225  }
1226
1227  bool isNEONi32vmov() const {
1228    if (Kind != k_Immediate)
1229      return false;
1230    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1231    // Must be a constant.
1232    if (!CE) return false;
1233    int64_t Value = CE->getValue();
1234    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1235    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1236    return (Value >= 0 && Value < 256) ||
1237      (Value >= 0x0100 && Value <= 0xff00) ||
1238      (Value >= 0x010000 && Value <= 0xff0000) ||
1239      (Value >= 0x01000000 && Value <= 0xff000000) ||
1240      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1241      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1242  }
1243  bool isNEONi32vmovNeg() const {
1244    if (Kind != k_Immediate)
1245      return false;
1246    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1247    // Must be a constant.
1248    if (!CE) return false;
1249    int64_t Value = ~CE->getValue();
1250    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1251    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1252    return (Value >= 0 && Value < 256) ||
1253      (Value >= 0x0100 && Value <= 0xff00) ||
1254      (Value >= 0x010000 && Value <= 0xff0000) ||
1255      (Value >= 0x01000000 && Value <= 0xff000000) ||
1256      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1257      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1258  }
1259
1260  bool isNEONi64splat() const {
1261    if (Kind != k_Immediate)
1262      return false;
1263    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1264    // Must be a constant.
1265    if (!CE) return false;
1266    uint64_t Value = CE->getValue();
1267    // i64 value with each byte being either 0 or 0xff.
1268    for (unsigned i = 0; i < 8; ++i)
1269      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1270    return true;
1271  }
1272
1273  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1274    // Add as immediates when possible.  Null MCExpr = 0.
1275    if (Expr == 0)
1276      Inst.addOperand(MCOperand::CreateImm(0));
1277    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1278      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1279    else
1280      Inst.addOperand(MCOperand::CreateExpr(Expr));
1281  }
1282
1283  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1284    assert(N == 2 && "Invalid number of operands!");
1285    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1286    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1287    Inst.addOperand(MCOperand::CreateReg(RegNum));
1288  }
1289
1290  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1291    assert(N == 1 && "Invalid number of operands!");
1292    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1293  }
1294
1295  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1296    assert(N == 1 && "Invalid number of operands!");
1297    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1298  }
1299
1300  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1301    assert(N == 1 && "Invalid number of operands!");
1302    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1303  }
1304
1305  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1306    assert(N == 1 && "Invalid number of operands!");
1307    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1308  }
1309
1310  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1311    assert(N == 1 && "Invalid number of operands!");
1312    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1313  }
1314
1315  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1316    assert(N == 1 && "Invalid number of operands!");
1317    Inst.addOperand(MCOperand::CreateReg(getReg()));
1318  }
1319
1320  void addRegOperands(MCInst &Inst, unsigned N) const {
1321    assert(N == 1 && "Invalid number of operands!");
1322    Inst.addOperand(MCOperand::CreateReg(getReg()));
1323  }
1324
1325  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1326    assert(N == 3 && "Invalid number of operands!");
1327    assert(isRegShiftedReg() &&
1328           "addRegShiftedRegOperands() on non RegShiftedReg!");
1329    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1330    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1331    Inst.addOperand(MCOperand::CreateImm(
1332      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1333  }
1334
1335  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1336    assert(N == 2 && "Invalid number of operands!");
1337    assert(isRegShiftedImm() &&
1338           "addRegShiftedImmOperands() on non RegShiftedImm!");
1339    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1340    Inst.addOperand(MCOperand::CreateImm(
1341      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1342  }
1343
1344  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1345    assert(N == 1 && "Invalid number of operands!");
1346    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1347                                         ShifterImm.Imm));
1348  }
1349
1350  void addRegListOperands(MCInst &Inst, unsigned N) const {
1351    assert(N == 1 && "Invalid number of operands!");
1352    const SmallVectorImpl<unsigned> &RegList = getRegList();
1353    for (SmallVectorImpl<unsigned>::const_iterator
1354           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1355      Inst.addOperand(MCOperand::CreateReg(*I));
1356  }
1357
1358  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1359    addRegListOperands(Inst, N);
1360  }
1361
1362  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1363    addRegListOperands(Inst, N);
1364  }
1365
1366  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1367    assert(N == 1 && "Invalid number of operands!");
1368    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1369    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1370  }
1371
1372  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1373    assert(N == 1 && "Invalid number of operands!");
1374    // Munge the lsb/width into a bitfield mask.
1375    unsigned lsb = Bitfield.LSB;
1376    unsigned width = Bitfield.Width;
1377    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1378    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1379                      (32 - (lsb + width)));
1380    Inst.addOperand(MCOperand::CreateImm(Mask));
1381  }
1382
1383  void addImmOperands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    addExpr(Inst, getImm());
1386  }
1387
1388  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1389    assert(N == 1 && "Invalid number of operands!");
1390    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1391  }
1392
1393  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1394    assert(N == 1 && "Invalid number of operands!");
1395    // FIXME: We really want to scale the value here, but the LDRD/STRD
1396    // instruction don't encode operands that way yet.
1397    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1398    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1399  }
1400
1401  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1402    assert(N == 1 && "Invalid number of operands!");
1403    // The immediate is scaled by four in the encoding and is stored
1404    // in the MCInst as such. Lop off the low two bits here.
1405    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1406    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1407  }
1408
1409  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1410    assert(N == 1 && "Invalid number of operands!");
1411    // The immediate is scaled by four in the encoding and is stored
1412    // in the MCInst as such. Lop off the low two bits here.
1413    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1414    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1415  }
1416
1417  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1418    assert(N == 1 && "Invalid number of operands!");
1419    // The constant encodes as the immediate-1, and we store in the instruction
1420    // the bits as encoded, so subtract off one here.
1421    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1422    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1423  }
1424
1425  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1426    assert(N == 1 && "Invalid number of operands!");
1427    // The constant encodes as the immediate-1, and we store in the instruction
1428    // the bits as encoded, so subtract off one here.
1429    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1430    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1431  }
1432
1433  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1434    assert(N == 1 && "Invalid number of operands!");
1435    // The constant encodes as the immediate, except for 32, which encodes as
1436    // zero.
1437    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1438    unsigned Imm = CE->getValue();
1439    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1440  }
1441
1442  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1443    assert(N == 1 && "Invalid number of operands!");
1444    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1445    // the instruction as well.
1446    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1447    int Val = CE->getValue();
1448    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1449  }
1450
1451  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    // The operand is actually a t2_so_imm, but we have its bitwise
1454    // negation in the assembly source, so twiddle it here.
1455    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1456    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1457  }
1458
1459  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1460    assert(N == 1 && "Invalid number of operands!");
1461    // The operand is actually a t2_so_imm, but we have its
1462    // negation in the assembly source, so twiddle it here.
1463    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1464    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1465  }
1466
1467  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    // The operand is actually a so_imm, but we have its bitwise
1470    // negation in the assembly source, so twiddle it here.
1471    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1472    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1473  }
1474
1475  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 1 && "Invalid number of operands!");
1477    // The operand is actually a so_imm, but we have its
1478    // negation in the assembly source, so twiddle it here.
1479    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1480    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1481  }
1482
1483  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1484    assert(N == 1 && "Invalid number of operands!");
1485    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1486  }
1487
1488  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1489    assert(N == 1 && "Invalid number of operands!");
1490    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1491  }
1492
1493  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1494    assert(N == 2 && "Invalid number of operands!");
1495    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1496    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1497  }
1498
1499  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1500    assert(N == 3 && "Invalid number of operands!");
1501    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1502    if (!Memory.OffsetRegNum) {
1503      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1504      // Special case for #-0
1505      if (Val == INT32_MIN) Val = 0;
1506      if (Val < 0) Val = -Val;
1507      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1508    } else {
1509      // For register offset, we encode the shift type and negation flag
1510      // here.
1511      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1512                              Memory.ShiftImm, Memory.ShiftType);
1513    }
1514    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1515    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1516    Inst.addOperand(MCOperand::CreateImm(Val));
1517  }
1518
1519  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1520    assert(N == 2 && "Invalid number of operands!");
1521    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1522    assert(CE && "non-constant AM2OffsetImm operand!");
1523    int32_t Val = CE->getValue();
1524    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1525    // Special case for #-0
1526    if (Val == INT32_MIN) Val = 0;
1527    if (Val < 0) Val = -Val;
1528    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1529    Inst.addOperand(MCOperand::CreateReg(0));
1530    Inst.addOperand(MCOperand::CreateImm(Val));
1531  }
1532
1533  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1534    assert(N == 3 && "Invalid number of operands!");
1535    // If we have an immediate that's not a constant, treat it as a label
1536    // reference needing a fixup. If it is a constant, it's something else
1537    // and we reject it.
1538    if (isImm()) {
1539      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1540      Inst.addOperand(MCOperand::CreateReg(0));
1541      Inst.addOperand(MCOperand::CreateImm(0));
1542      return;
1543    }
1544
1545    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1546    if (!Memory.OffsetRegNum) {
1547      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1548      // Special case for #-0
1549      if (Val == INT32_MIN) Val = 0;
1550      if (Val < 0) Val = -Val;
1551      Val = ARM_AM::getAM3Opc(AddSub, Val);
1552    } else {
1553      // For register offset, we encode the shift type and negation flag
1554      // here.
1555      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1556    }
1557    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1558    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1559    Inst.addOperand(MCOperand::CreateImm(Val));
1560  }
1561
1562  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1563    assert(N == 2 && "Invalid number of operands!");
1564    if (Kind == k_PostIndexRegister) {
1565      int32_t Val =
1566        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1567      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1568      Inst.addOperand(MCOperand::CreateImm(Val));
1569      return;
1570    }
1571
1572    // Constant offset.
1573    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1574    int32_t Val = CE->getValue();
1575    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1576    // Special case for #-0
1577    if (Val == INT32_MIN) Val = 0;
1578    if (Val < 0) Val = -Val;
1579    Val = ARM_AM::getAM3Opc(AddSub, Val);
1580    Inst.addOperand(MCOperand::CreateReg(0));
1581    Inst.addOperand(MCOperand::CreateImm(Val));
1582  }
1583
1584  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1585    assert(N == 2 && "Invalid number of operands!");
1586    // If we have an immediate that's not a constant, treat it as a label
1587    // reference needing a fixup. If it is a constant, it's something else
1588    // and we reject it.
1589    if (isImm()) {
1590      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1591      Inst.addOperand(MCOperand::CreateImm(0));
1592      return;
1593    }
1594
1595    // The lower two bits are always zero and as such are not encoded.
1596    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1597    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1598    // Special case for #-0
1599    if (Val == INT32_MIN) Val = 0;
1600    if (Val < 0) Val = -Val;
1601    Val = ARM_AM::getAM5Opc(AddSub, Val);
1602    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1603    Inst.addOperand(MCOperand::CreateImm(Val));
1604  }
1605
1606  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1607    assert(N == 2 && "Invalid number of operands!");
1608    // If we have an immediate that's not a constant, treat it as a label
1609    // reference needing a fixup. If it is a constant, it's something else
1610    // and we reject it.
1611    if (isImm()) {
1612      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1613      Inst.addOperand(MCOperand::CreateImm(0));
1614      return;
1615    }
1616
1617    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1618    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1619    Inst.addOperand(MCOperand::CreateImm(Val));
1620  }
1621
1622  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1623    assert(N == 2 && "Invalid number of operands!");
1624    // The lower two bits are always zero and as such are not encoded.
1625    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1626    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1627    Inst.addOperand(MCOperand::CreateImm(Val));
1628  }
1629
1630  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1631    assert(N == 2 && "Invalid number of operands!");
1632    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1633    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1634    Inst.addOperand(MCOperand::CreateImm(Val));
1635  }
1636
1637  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1638    addMemImm8OffsetOperands(Inst, N);
1639  }
1640
1641  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1642    addMemImm8OffsetOperands(Inst, N);
1643  }
1644
1645  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1646    assert(N == 2 && "Invalid number of operands!");
1647    // If this is an immediate, it's a label reference.
1648    if (Kind == k_Immediate) {
1649      addExpr(Inst, getImm());
1650      Inst.addOperand(MCOperand::CreateImm(0));
1651      return;
1652    }
1653
1654    // Otherwise, it's a normal memory reg+offset.
1655    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1656    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1657    Inst.addOperand(MCOperand::CreateImm(Val));
1658  }
1659
1660  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1661    assert(N == 2 && "Invalid number of operands!");
1662    // If this is an immediate, it's a label reference.
1663    if (Kind == k_Immediate) {
1664      addExpr(Inst, getImm());
1665      Inst.addOperand(MCOperand::CreateImm(0));
1666      return;
1667    }
1668
1669    // Otherwise, it's a normal memory reg+offset.
1670    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1671    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1672    Inst.addOperand(MCOperand::CreateImm(Val));
1673  }
1674
1675  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1676    assert(N == 2 && "Invalid number of operands!");
1677    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1678    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1679  }
1680
1681  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1682    assert(N == 2 && "Invalid number of operands!");
1683    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1684    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1685  }
1686
1687  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1688    assert(N == 3 && "Invalid number of operands!");
1689    unsigned Val =
1690      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1691                        Memory.ShiftImm, Memory.ShiftType);
1692    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1693    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1694    Inst.addOperand(MCOperand::CreateImm(Val));
1695  }
1696
1697  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1698    assert(N == 3 && "Invalid number of operands!");
1699    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1700    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1701    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1702  }
1703
1704  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1705    assert(N == 2 && "Invalid number of operands!");
1706    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1707    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1708  }
1709
1710  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1711    assert(N == 2 && "Invalid number of operands!");
1712    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1713    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1714    Inst.addOperand(MCOperand::CreateImm(Val));
1715  }
1716
1717  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1718    assert(N == 2 && "Invalid number of operands!");
1719    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1720    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1721    Inst.addOperand(MCOperand::CreateImm(Val));
1722  }
1723
1724  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1725    assert(N == 2 && "Invalid number of operands!");
1726    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1727    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1728    Inst.addOperand(MCOperand::CreateImm(Val));
1729  }
1730
1731  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1732    assert(N == 2 && "Invalid number of operands!");
1733    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1734    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1735    Inst.addOperand(MCOperand::CreateImm(Val));
1736  }
1737
1738  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1739    assert(N == 1 && "Invalid number of operands!");
1740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1741    assert(CE && "non-constant post-idx-imm8 operand!");
1742    int Imm = CE->getValue();
1743    bool isAdd = Imm >= 0;
1744    if (Imm == INT32_MIN) Imm = 0;
1745    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1746    Inst.addOperand(MCOperand::CreateImm(Imm));
1747  }
1748
1749  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1750    assert(N == 1 && "Invalid number of operands!");
1751    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1752    assert(CE && "non-constant post-idx-imm8s4 operand!");
1753    int Imm = CE->getValue();
1754    bool isAdd = Imm >= 0;
1755    if (Imm == INT32_MIN) Imm = 0;
1756    // Immediate is scaled by 4.
1757    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1758    Inst.addOperand(MCOperand::CreateImm(Imm));
1759  }
1760
1761  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1762    assert(N == 2 && "Invalid number of operands!");
1763    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1764    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1765  }
1766
1767  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1768    assert(N == 2 && "Invalid number of operands!");
1769    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1770    // The sign, shift type, and shift amount are encoded in a single operand
1771    // using the AM2 encoding helpers.
1772    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1773    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1774                                     PostIdxReg.ShiftTy);
1775    Inst.addOperand(MCOperand::CreateImm(Imm));
1776  }
1777
1778  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1779    assert(N == 1 && "Invalid number of operands!");
1780    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1781  }
1782
1783  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1784    assert(N == 1 && "Invalid number of operands!");
1785    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1786  }
1787
1788  void addVecListOperands(MCInst &Inst, unsigned N) const {
1789    assert(N == 1 && "Invalid number of operands!");
1790    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1791  }
1792
1793  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1794    assert(N == 2 && "Invalid number of operands!");
1795    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1796    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1797  }
1798
1799  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1800    assert(N == 1 && "Invalid number of operands!");
1801    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1802  }
1803
1804  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1805    assert(N == 1 && "Invalid number of operands!");
1806    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1807  }
1808
1809  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1810    assert(N == 1 && "Invalid number of operands!");
1811    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1812  }
1813
1814  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1815    assert(N == 1 && "Invalid number of operands!");
1816    // The immediate encodes the type of constant as well as the value.
1817    // Mask in that this is an i8 splat.
1818    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1819    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1820  }
1821
1822  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1823    assert(N == 1 && "Invalid number of operands!");
1824    // The immediate encodes the type of constant as well as the value.
1825    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1826    unsigned Value = CE->getValue();
1827    if (Value >= 256)
1828      Value = (Value >> 8) | 0xa00;
1829    else
1830      Value |= 0x800;
1831    Inst.addOperand(MCOperand::CreateImm(Value));
1832  }
1833
1834  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1835    assert(N == 1 && "Invalid number of operands!");
1836    // The immediate encodes the type of constant as well as the value.
1837    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1838    unsigned Value = CE->getValue();
1839    if (Value >= 256 && Value <= 0xff00)
1840      Value = (Value >> 8) | 0x200;
1841    else if (Value > 0xffff && Value <= 0xff0000)
1842      Value = (Value >> 16) | 0x400;
1843    else if (Value > 0xffffff)
1844      Value = (Value >> 24) | 0x600;
1845    Inst.addOperand(MCOperand::CreateImm(Value));
1846  }
1847
1848  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1849    assert(N == 1 && "Invalid number of operands!");
1850    // The immediate encodes the type of constant as well as the value.
1851    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1852    unsigned Value = CE->getValue();
1853    if (Value >= 256 && Value <= 0xffff)
1854      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1855    else if (Value > 0xffff && Value <= 0xffffff)
1856      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1857    else if (Value > 0xffffff)
1858      Value = (Value >> 24) | 0x600;
1859    Inst.addOperand(MCOperand::CreateImm(Value));
1860  }
1861
1862  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1863    assert(N == 1 && "Invalid number of operands!");
1864    // The immediate encodes the type of constant as well as the value.
1865    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1866    unsigned Value = ~CE->getValue();
1867    if (Value >= 256 && Value <= 0xffff)
1868      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1869    else if (Value > 0xffff && Value <= 0xffffff)
1870      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1871    else if (Value > 0xffffff)
1872      Value = (Value >> 24) | 0x600;
1873    Inst.addOperand(MCOperand::CreateImm(Value));
1874  }
1875
1876  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1877    assert(N == 1 && "Invalid number of operands!");
1878    // The immediate encodes the type of constant as well as the value.
1879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1880    uint64_t Value = CE->getValue();
1881    unsigned Imm = 0;
1882    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1883      Imm |= (Value & 1) << i;
1884    }
1885    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1886  }
1887
1888  virtual void print(raw_ostream &OS) const;
1889
1890  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1891    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1892    Op->ITMask.Mask = Mask;
1893    Op->StartLoc = S;
1894    Op->EndLoc = S;
1895    return Op;
1896  }
1897
1898  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1899    ARMOperand *Op = new ARMOperand(k_CondCode);
1900    Op->CC.Val = CC;
1901    Op->StartLoc = S;
1902    Op->EndLoc = S;
1903    return Op;
1904  }
1905
1906  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1907    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1908    Op->Cop.Val = CopVal;
1909    Op->StartLoc = S;
1910    Op->EndLoc = S;
1911    return Op;
1912  }
1913
1914  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1915    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1916    Op->Cop.Val = CopVal;
1917    Op->StartLoc = S;
1918    Op->EndLoc = S;
1919    return Op;
1920  }
1921
1922  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1923    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1924    Op->Cop.Val = Val;
1925    Op->StartLoc = S;
1926    Op->EndLoc = E;
1927    return Op;
1928  }
1929
1930  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1931    ARMOperand *Op = new ARMOperand(k_CCOut);
1932    Op->Reg.RegNum = RegNum;
1933    Op->StartLoc = S;
1934    Op->EndLoc = S;
1935    return Op;
1936  }
1937
1938  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1939    ARMOperand *Op = new ARMOperand(k_Token);
1940    Op->Tok.Data = Str.data();
1941    Op->Tok.Length = Str.size();
1942    Op->StartLoc = S;
1943    Op->EndLoc = S;
1944    return Op;
1945  }
1946
1947  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1948    ARMOperand *Op = new ARMOperand(k_Register);
1949    Op->Reg.RegNum = RegNum;
1950    Op->StartLoc = S;
1951    Op->EndLoc = E;
1952    return Op;
1953  }
1954
1955  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1956                                           unsigned SrcReg,
1957                                           unsigned ShiftReg,
1958                                           unsigned ShiftImm,
1959                                           SMLoc S, SMLoc E) {
1960    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1961    Op->RegShiftedReg.ShiftTy = ShTy;
1962    Op->RegShiftedReg.SrcReg = SrcReg;
1963    Op->RegShiftedReg.ShiftReg = ShiftReg;
1964    Op->RegShiftedReg.ShiftImm = ShiftImm;
1965    Op->StartLoc = S;
1966    Op->EndLoc = E;
1967    return Op;
1968  }
1969
1970  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1971                                            unsigned SrcReg,
1972                                            unsigned ShiftImm,
1973                                            SMLoc S, SMLoc E) {
1974    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1975    Op->RegShiftedImm.ShiftTy = ShTy;
1976    Op->RegShiftedImm.SrcReg = SrcReg;
1977    Op->RegShiftedImm.ShiftImm = ShiftImm;
1978    Op->StartLoc = S;
1979    Op->EndLoc = E;
1980    return Op;
1981  }
1982
1983  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1984                                   SMLoc S, SMLoc E) {
1985    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1986    Op->ShifterImm.isASR = isASR;
1987    Op->ShifterImm.Imm = Imm;
1988    Op->StartLoc = S;
1989    Op->EndLoc = E;
1990    return Op;
1991  }
1992
1993  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1994    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1995    Op->RotImm.Imm = Imm;
1996    Op->StartLoc = S;
1997    Op->EndLoc = E;
1998    return Op;
1999  }
2000
2001  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2002                                    SMLoc S, SMLoc E) {
2003    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2004    Op->Bitfield.LSB = LSB;
2005    Op->Bitfield.Width = Width;
2006    Op->StartLoc = S;
2007    Op->EndLoc = E;
2008    return Op;
2009  }
2010
2011  static ARMOperand *
2012  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2013                SMLoc StartLoc, SMLoc EndLoc) {
2014    KindTy Kind = k_RegisterList;
2015
2016    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2017      Kind = k_DPRRegisterList;
2018    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2019             contains(Regs.front().first))
2020      Kind = k_SPRRegisterList;
2021
2022    ARMOperand *Op = new ARMOperand(Kind);
2023    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2024           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2025      Op->Registers.push_back(I->first);
2026    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2027    Op->StartLoc = StartLoc;
2028    Op->EndLoc = EndLoc;
2029    return Op;
2030  }
2031
2032  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2033                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2034    ARMOperand *Op = new ARMOperand(k_VectorList);
2035    Op->VectorList.RegNum = RegNum;
2036    Op->VectorList.Count = Count;
2037    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2038    Op->StartLoc = S;
2039    Op->EndLoc = E;
2040    return Op;
2041  }
2042
2043  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2044                                              SMLoc S, SMLoc E) {
2045    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2046    Op->VectorList.RegNum = RegNum;
2047    Op->VectorList.Count = Count;
2048    Op->StartLoc = S;
2049    Op->EndLoc = E;
2050    return Op;
2051  }
2052
2053  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2054                                             unsigned Index,
2055                                             bool isDoubleSpaced,
2056                                             SMLoc S, SMLoc E) {
2057    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2058    Op->VectorList.RegNum = RegNum;
2059    Op->VectorList.Count = Count;
2060    Op->VectorList.LaneIndex = Index;
2061    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2062    Op->StartLoc = S;
2063    Op->EndLoc = E;
2064    return Op;
2065  }
2066
2067  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2068                                       MCContext &Ctx) {
2069    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2070    Op->VectorIndex.Val = Idx;
2071    Op->StartLoc = S;
2072    Op->EndLoc = E;
2073    return Op;
2074  }
2075
2076  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2077    ARMOperand *Op = new ARMOperand(k_Immediate);
2078    Op->Imm.Val = Val;
2079    Op->StartLoc = S;
2080    Op->EndLoc = E;
2081    return Op;
2082  }
2083
2084  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2085    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2086    Op->FPImm.Val = Val;
2087    Op->StartLoc = S;
2088    Op->EndLoc = S;
2089    return Op;
2090  }
2091
2092  static ARMOperand *CreateMem(unsigned BaseRegNum,
2093                               const MCConstantExpr *OffsetImm,
2094                               unsigned OffsetRegNum,
2095                               ARM_AM::ShiftOpc ShiftType,
2096                               unsigned ShiftImm,
2097                               unsigned Alignment,
2098                               bool isNegative,
2099                               SMLoc S, SMLoc E) {
2100    ARMOperand *Op = new ARMOperand(k_Memory);
2101    Op->Memory.BaseRegNum = BaseRegNum;
2102    Op->Memory.OffsetImm = OffsetImm;
2103    Op->Memory.OffsetRegNum = OffsetRegNum;
2104    Op->Memory.ShiftType = ShiftType;
2105    Op->Memory.ShiftImm = ShiftImm;
2106    Op->Memory.Alignment = Alignment;
2107    Op->Memory.isNegative = isNegative;
2108    Op->StartLoc = S;
2109    Op->EndLoc = E;
2110    return Op;
2111  }
2112
2113  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2114                                      ARM_AM::ShiftOpc ShiftTy,
2115                                      unsigned ShiftImm,
2116                                      SMLoc S, SMLoc E) {
2117    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2118    Op->PostIdxReg.RegNum = RegNum;
2119    Op->PostIdxReg.isAdd = isAdd;
2120    Op->PostIdxReg.ShiftTy = ShiftTy;
2121    Op->PostIdxReg.ShiftImm = ShiftImm;
2122    Op->StartLoc = S;
2123    Op->EndLoc = E;
2124    return Op;
2125  }
2126
2127  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2128    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2129    Op->MBOpt.Val = Opt;
2130    Op->StartLoc = S;
2131    Op->EndLoc = S;
2132    return Op;
2133  }
2134
2135  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2136    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2137    Op->IFlags.Val = IFlags;
2138    Op->StartLoc = S;
2139    Op->EndLoc = S;
2140    return Op;
2141  }
2142
2143  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2144    ARMOperand *Op = new ARMOperand(k_MSRMask);
2145    Op->MMask.Val = MMask;
2146    Op->StartLoc = S;
2147    Op->EndLoc = S;
2148    return Op;
2149  }
2150};
2151
2152} // end anonymous namespace.
2153
2154void ARMOperand::print(raw_ostream &OS) const {
2155  switch (Kind) {
2156  case k_FPImmediate:
2157    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2158       << ") >";
2159    break;
2160  case k_CondCode:
2161    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2162    break;
2163  case k_CCOut:
2164    OS << "<ccout " << getReg() << ">";
2165    break;
2166  case k_ITCondMask: {
2167    static const char *MaskStr[] = {
2168      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2169      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2170    };
2171    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2172    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2173    break;
2174  }
2175  case k_CoprocNum:
2176    OS << "<coprocessor number: " << getCoproc() << ">";
2177    break;
2178  case k_CoprocReg:
2179    OS << "<coprocessor register: " << getCoproc() << ">";
2180    break;
2181  case k_CoprocOption:
2182    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2183    break;
2184  case k_MSRMask:
2185    OS << "<mask: " << getMSRMask() << ">";
2186    break;
2187  case k_Immediate:
2188    getImm()->print(OS);
2189    break;
2190  case k_MemBarrierOpt:
2191    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2192    break;
2193  case k_Memory:
2194    OS << "<memory "
2195       << " base:" << Memory.BaseRegNum;
2196    OS << ">";
2197    break;
2198  case k_PostIndexRegister:
2199    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2200       << PostIdxReg.RegNum;
2201    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2202      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2203         << PostIdxReg.ShiftImm;
2204    OS << ">";
2205    break;
2206  case k_ProcIFlags: {
2207    OS << "<ARM_PROC::";
2208    unsigned IFlags = getProcIFlags();
2209    for (int i=2; i >= 0; --i)
2210      if (IFlags & (1 << i))
2211        OS << ARM_PROC::IFlagsToString(1 << i);
2212    OS << ">";
2213    break;
2214  }
2215  case k_Register:
2216    OS << "<register " << getReg() << ">";
2217    break;
2218  case k_ShifterImmediate:
2219    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2220       << " #" << ShifterImm.Imm << ">";
2221    break;
2222  case k_ShiftedRegister:
2223    OS << "<so_reg_reg "
2224       << RegShiftedReg.SrcReg << " "
2225       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2226       << " " << RegShiftedReg.ShiftReg << ">";
2227    break;
2228  case k_ShiftedImmediate:
2229    OS << "<so_reg_imm "
2230       << RegShiftedImm.SrcReg << " "
2231       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2232       << " #" << RegShiftedImm.ShiftImm << ">";
2233    break;
2234  case k_RotateImmediate:
2235    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2236    break;
2237  case k_BitfieldDescriptor:
2238    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2239       << ", width: " << Bitfield.Width << ">";
2240    break;
2241  case k_RegisterList:
2242  case k_DPRRegisterList:
2243  case k_SPRRegisterList: {
2244    OS << "<register_list ";
2245
2246    const SmallVectorImpl<unsigned> &RegList = getRegList();
2247    for (SmallVectorImpl<unsigned>::const_iterator
2248           I = RegList.begin(), E = RegList.end(); I != E; ) {
2249      OS << *I;
2250      if (++I < E) OS << ", ";
2251    }
2252
2253    OS << ">";
2254    break;
2255  }
2256  case k_VectorList:
2257    OS << "<vector_list " << VectorList.Count << " * "
2258       << VectorList.RegNum << ">";
2259    break;
2260  case k_VectorListAllLanes:
2261    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2262       << VectorList.RegNum << ">";
2263    break;
2264  case k_VectorListIndexed:
2265    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2266       << VectorList.Count << " * " << VectorList.RegNum << ">";
2267    break;
2268  case k_Token:
2269    OS << "'" << getToken() << "'";
2270    break;
2271  case k_VectorIndex:
2272    OS << "<vectorindex " << getVectorIndex() << ">";
2273    break;
2274  }
2275}
2276
2277/// @name Auto-generated Match Functions
2278/// {
2279
2280static unsigned MatchRegisterName(StringRef Name);
2281
2282/// }
2283
2284bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2285                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2286  StartLoc = Parser.getTok().getLoc();
2287  RegNo = tryParseRegister();
2288  EndLoc = Parser.getTok().getLoc();
2289
2290  return (RegNo == (unsigned)-1);
2291}
2292
2293/// Try to parse a register name.  The token must be an Identifier when called,
2294/// and if it is a register name the token is eaten and the register number is
2295/// returned.  Otherwise return -1.
2296///
2297int ARMAsmParser::tryParseRegister() {
2298  const AsmToken &Tok = Parser.getTok();
2299  if (Tok.isNot(AsmToken::Identifier)) return -1;
2300
2301  std::string lowerCase = Tok.getString().lower();
2302  unsigned RegNum = MatchRegisterName(lowerCase);
2303  if (!RegNum) {
2304    RegNum = StringSwitch<unsigned>(lowerCase)
2305      .Case("r13", ARM::SP)
2306      .Case("r14", ARM::LR)
2307      .Case("r15", ARM::PC)
2308      .Case("ip", ARM::R12)
2309      // Additional register name aliases for 'gas' compatibility.
2310      .Case("a1", ARM::R0)
2311      .Case("a2", ARM::R1)
2312      .Case("a3", ARM::R2)
2313      .Case("a4", ARM::R3)
2314      .Case("v1", ARM::R4)
2315      .Case("v2", ARM::R5)
2316      .Case("v3", ARM::R6)
2317      .Case("v4", ARM::R7)
2318      .Case("v5", ARM::R8)
2319      .Case("v6", ARM::R9)
2320      .Case("v7", ARM::R10)
2321      .Case("v8", ARM::R11)
2322      .Case("sb", ARM::R9)
2323      .Case("sl", ARM::R10)
2324      .Case("fp", ARM::R11)
2325      .Default(0);
2326  }
2327  if (!RegNum) {
2328    // Check for aliases registered via .req. Canonicalize to lower case.
2329    // That's more consistent since register names are case insensitive, and
2330    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2331    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2332    // If no match, return failure.
2333    if (Entry == RegisterReqs.end())
2334      return -1;
2335    Parser.Lex(); // Eat identifier token.
2336    return Entry->getValue();
2337  }
2338
2339  Parser.Lex(); // Eat identifier token.
2340
2341  return RegNum;
2342}
2343
2344// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2345// If a recoverable error occurs, return 1. If an irrecoverable error
2346// occurs, return -1. An irrecoverable error is one where tokens have been
2347// consumed in the process of trying to parse the shifter (i.e., when it is
2348// indeed a shifter operand, but malformed).
2349int ARMAsmParser::tryParseShiftRegister(
2350                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2351  SMLoc S = Parser.getTok().getLoc();
2352  const AsmToken &Tok = Parser.getTok();
2353  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2354
2355  std::string lowerCase = Tok.getString().lower();
2356  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2357      .Case("asl", ARM_AM::lsl)
2358      .Case("lsl", ARM_AM::lsl)
2359      .Case("lsr", ARM_AM::lsr)
2360      .Case("asr", ARM_AM::asr)
2361      .Case("ror", ARM_AM::ror)
2362      .Case("rrx", ARM_AM::rrx)
2363      .Default(ARM_AM::no_shift);
2364
2365  if (ShiftTy == ARM_AM::no_shift)
2366    return 1;
2367
2368  Parser.Lex(); // Eat the operator.
2369
2370  // The source register for the shift has already been added to the
2371  // operand list, so we need to pop it off and combine it into the shifted
2372  // register operand instead.
2373  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2374  if (!PrevOp->isReg())
2375    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2376  int SrcReg = PrevOp->getReg();
2377  int64_t Imm = 0;
2378  int ShiftReg = 0;
2379  if (ShiftTy == ARM_AM::rrx) {
2380    // RRX Doesn't have an explicit shift amount. The encoder expects
2381    // the shift register to be the same as the source register. Seems odd,
2382    // but OK.
2383    ShiftReg = SrcReg;
2384  } else {
2385    // Figure out if this is shifted by a constant or a register (for non-RRX).
2386    if (Parser.getTok().is(AsmToken::Hash) ||
2387        Parser.getTok().is(AsmToken::Dollar)) {
2388      Parser.Lex(); // Eat hash.
2389      SMLoc ImmLoc = Parser.getTok().getLoc();
2390      const MCExpr *ShiftExpr = 0;
2391      if (getParser().ParseExpression(ShiftExpr)) {
2392        Error(ImmLoc, "invalid immediate shift value");
2393        return -1;
2394      }
2395      // The expression must be evaluatable as an immediate.
2396      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2397      if (!CE) {
2398        Error(ImmLoc, "invalid immediate shift value");
2399        return -1;
2400      }
2401      // Range check the immediate.
2402      // lsl, ror: 0 <= imm <= 31
2403      // lsr, asr: 0 <= imm <= 32
2404      Imm = CE->getValue();
2405      if (Imm < 0 ||
2406          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2407          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2408        Error(ImmLoc, "immediate shift value out of range");
2409        return -1;
2410      }
2411    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2412      ShiftReg = tryParseRegister();
2413      SMLoc L = Parser.getTok().getLoc();
2414      if (ShiftReg == -1) {
2415        Error (L, "expected immediate or register in shift operand");
2416        return -1;
2417      }
2418    } else {
2419      Error (Parser.getTok().getLoc(),
2420                    "expected immediate or register in shift operand");
2421      return -1;
2422    }
2423  }
2424
2425  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2426    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2427                                                         ShiftReg, Imm,
2428                                               S, Parser.getTok().getLoc()));
2429  else
2430    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2431                                               S, Parser.getTok().getLoc()));
2432
2433  return 0;
2434}
2435
2436
2437/// Try to parse a register name.  The token must be an Identifier when called.
2438/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2439/// if there is a "writeback". 'true' if it's not a register.
2440///
2441/// TODO this is likely to change to allow different register types and or to
2442/// parse for a specific register type.
2443bool ARMAsmParser::
2444tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2445  SMLoc S = Parser.getTok().getLoc();
2446  int RegNo = tryParseRegister();
2447  if (RegNo == -1)
2448    return true;
2449
2450  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2451
2452  const AsmToken &ExclaimTok = Parser.getTok();
2453  if (ExclaimTok.is(AsmToken::Exclaim)) {
2454    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2455                                               ExclaimTok.getLoc()));
2456    Parser.Lex(); // Eat exclaim token
2457    return false;
2458  }
2459
2460  // Also check for an index operand. This is only legal for vector registers,
2461  // but that'll get caught OK in operand matching, so we don't need to
2462  // explicitly filter everything else out here.
2463  if (Parser.getTok().is(AsmToken::LBrac)) {
2464    SMLoc SIdx = Parser.getTok().getLoc();
2465    Parser.Lex(); // Eat left bracket token.
2466
2467    const MCExpr *ImmVal;
2468    if (getParser().ParseExpression(ImmVal))
2469      return MatchOperand_ParseFail;
2470    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2471    if (!MCE) {
2472      TokError("immediate value expected for vector index");
2473      return MatchOperand_ParseFail;
2474    }
2475
2476    SMLoc E = Parser.getTok().getLoc();
2477    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2478      Error(E, "']' expected");
2479      return MatchOperand_ParseFail;
2480    }
2481
2482    Parser.Lex(); // Eat right bracket token.
2483
2484    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2485                                                     SIdx, E,
2486                                                     getContext()));
2487  }
2488
2489  return false;
2490}
2491
2492/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2493/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2494/// "c5", ...
2495static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2496  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2497  // but efficient.
2498  switch (Name.size()) {
2499  default: break;
2500  case 2:
2501    if (Name[0] != CoprocOp)
2502      return -1;
2503    switch (Name[1]) {
2504    default:  return -1;
2505    case '0': return 0;
2506    case '1': return 1;
2507    case '2': return 2;
2508    case '3': return 3;
2509    case '4': return 4;
2510    case '5': return 5;
2511    case '6': return 6;
2512    case '7': return 7;
2513    case '8': return 8;
2514    case '9': return 9;
2515    }
2516    break;
2517  case 3:
2518    if (Name[0] != CoprocOp || Name[1] != '1')
2519      return -1;
2520    switch (Name[2]) {
2521    default:  return -1;
2522    case '0': return 10;
2523    case '1': return 11;
2524    case '2': return 12;
2525    case '3': return 13;
2526    case '4': return 14;
2527    case '5': return 15;
2528    }
2529    break;
2530  }
2531
2532  return -1;
2533}
2534
2535/// parseITCondCode - Try to parse a condition code for an IT instruction.
2536ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2537parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2538  SMLoc S = Parser.getTok().getLoc();
2539  const AsmToken &Tok = Parser.getTok();
2540  if (!Tok.is(AsmToken::Identifier))
2541    return MatchOperand_NoMatch;
2542  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2543    .Case("eq", ARMCC::EQ)
2544    .Case("ne", ARMCC::NE)
2545    .Case("hs", ARMCC::HS)
2546    .Case("cs", ARMCC::HS)
2547    .Case("lo", ARMCC::LO)
2548    .Case("cc", ARMCC::LO)
2549    .Case("mi", ARMCC::MI)
2550    .Case("pl", ARMCC::PL)
2551    .Case("vs", ARMCC::VS)
2552    .Case("vc", ARMCC::VC)
2553    .Case("hi", ARMCC::HI)
2554    .Case("ls", ARMCC::LS)
2555    .Case("ge", ARMCC::GE)
2556    .Case("lt", ARMCC::LT)
2557    .Case("gt", ARMCC::GT)
2558    .Case("le", ARMCC::LE)
2559    .Case("al", ARMCC::AL)
2560    .Default(~0U);
2561  if (CC == ~0U)
2562    return MatchOperand_NoMatch;
2563  Parser.Lex(); // Eat the token.
2564
2565  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2566
2567  return MatchOperand_Success;
2568}
2569
2570/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2571/// token must be an Identifier when called, and if it is a coprocessor
2572/// number, the token is eaten and the operand is added to the operand list.
2573ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2574parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2575  SMLoc S = Parser.getTok().getLoc();
2576  const AsmToken &Tok = Parser.getTok();
2577  if (Tok.isNot(AsmToken::Identifier))
2578    return MatchOperand_NoMatch;
2579
2580  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2581  if (Num == -1)
2582    return MatchOperand_NoMatch;
2583
2584  Parser.Lex(); // Eat identifier token.
2585  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2586  return MatchOperand_Success;
2587}
2588
2589/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2590/// token must be an Identifier when called, and if it is a coprocessor
2591/// number, the token is eaten and the operand is added to the operand list.
2592ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2593parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2594  SMLoc S = Parser.getTok().getLoc();
2595  const AsmToken &Tok = Parser.getTok();
2596  if (Tok.isNot(AsmToken::Identifier))
2597    return MatchOperand_NoMatch;
2598
2599  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2600  if (Reg == -1)
2601    return MatchOperand_NoMatch;
2602
2603  Parser.Lex(); // Eat identifier token.
2604  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2605  return MatchOperand_Success;
2606}
2607
2608/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2609/// coproc_option : '{' imm0_255 '}'
2610ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2611parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2612  SMLoc S = Parser.getTok().getLoc();
2613
2614  // If this isn't a '{', this isn't a coprocessor immediate operand.
2615  if (Parser.getTok().isNot(AsmToken::LCurly))
2616    return MatchOperand_NoMatch;
2617  Parser.Lex(); // Eat the '{'
2618
2619  const MCExpr *Expr;
2620  SMLoc Loc = Parser.getTok().getLoc();
2621  if (getParser().ParseExpression(Expr)) {
2622    Error(Loc, "illegal expression");
2623    return MatchOperand_ParseFail;
2624  }
2625  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2626  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2627    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2628    return MatchOperand_ParseFail;
2629  }
2630  int Val = CE->getValue();
2631
2632  // Check for and consume the closing '}'
2633  if (Parser.getTok().isNot(AsmToken::RCurly))
2634    return MatchOperand_ParseFail;
2635  SMLoc E = Parser.getTok().getLoc();
2636  Parser.Lex(); // Eat the '}'
2637
2638  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2639  return MatchOperand_Success;
2640}
2641
2642// For register list parsing, we need to map from raw GPR register numbering
2643// to the enumeration values. The enumeration values aren't sorted by
2644// register number due to our using "sp", "lr" and "pc" as canonical names.
2645static unsigned getNextRegister(unsigned Reg) {
2646  // If this is a GPR, we need to do it manually, otherwise we can rely
2647  // on the sort ordering of the enumeration since the other reg-classes
2648  // are sane.
2649  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2650    return Reg + 1;
2651  switch(Reg) {
2652  default: assert(0 && "Invalid GPR number!");
2653  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2654  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2655  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2656  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2657  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2658  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2659  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2660  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2661  }
2662}
2663
2664// Return the low-subreg of a given Q register.
2665static unsigned getDRegFromQReg(unsigned QReg) {
2666  switch (QReg) {
2667  default: llvm_unreachable("expected a Q register!");
2668  case ARM::Q0:  return ARM::D0;
2669  case ARM::Q1:  return ARM::D2;
2670  case ARM::Q2:  return ARM::D4;
2671  case ARM::Q3:  return ARM::D6;
2672  case ARM::Q4:  return ARM::D8;
2673  case ARM::Q5:  return ARM::D10;
2674  case ARM::Q6:  return ARM::D12;
2675  case ARM::Q7:  return ARM::D14;
2676  case ARM::Q8:  return ARM::D16;
2677  case ARM::Q9:  return ARM::D18;
2678  case ARM::Q10: return ARM::D20;
2679  case ARM::Q11: return ARM::D22;
2680  case ARM::Q12: return ARM::D24;
2681  case ARM::Q13: return ARM::D26;
2682  case ARM::Q14: return ARM::D28;
2683  case ARM::Q15: return ARM::D30;
2684  }
2685}
2686
2687/// Parse a register list.
2688bool ARMAsmParser::
2689parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2690  assert(Parser.getTok().is(AsmToken::LCurly) &&
2691         "Token is not a Left Curly Brace");
2692  SMLoc S = Parser.getTok().getLoc();
2693  Parser.Lex(); // Eat '{' token.
2694  SMLoc RegLoc = Parser.getTok().getLoc();
2695
2696  // Check the first register in the list to see what register class
2697  // this is a list of.
2698  int Reg = tryParseRegister();
2699  if (Reg == -1)
2700    return Error(RegLoc, "register expected");
2701
2702  // The reglist instructions have at most 16 registers, so reserve
2703  // space for that many.
2704  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2705
2706  // Allow Q regs and just interpret them as the two D sub-registers.
2707  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2708    Reg = getDRegFromQReg(Reg);
2709    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2710    ++Reg;
2711  }
2712  const MCRegisterClass *RC;
2713  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2714    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2715  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2716    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2717  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2718    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2719  else
2720    return Error(RegLoc, "invalid register in register list");
2721
2722  // Store the register.
2723  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2724
2725  // This starts immediately after the first register token in the list,
2726  // so we can see either a comma or a minus (range separator) as a legal
2727  // next token.
2728  while (Parser.getTok().is(AsmToken::Comma) ||
2729         Parser.getTok().is(AsmToken::Minus)) {
2730    if (Parser.getTok().is(AsmToken::Minus)) {
2731      Parser.Lex(); // Eat the minus.
2732      SMLoc EndLoc = Parser.getTok().getLoc();
2733      int EndReg = tryParseRegister();
2734      if (EndReg == -1)
2735        return Error(EndLoc, "register expected");
2736      // Allow Q regs and just interpret them as the two D sub-registers.
2737      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2738        EndReg = getDRegFromQReg(EndReg) + 1;
2739      // If the register is the same as the start reg, there's nothing
2740      // more to do.
2741      if (Reg == EndReg)
2742        continue;
2743      // The register must be in the same register class as the first.
2744      if (!RC->contains(EndReg))
2745        return Error(EndLoc, "invalid register in register list");
2746      // Ranges must go from low to high.
2747      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2748        return Error(EndLoc, "bad range in register list");
2749
2750      // Add all the registers in the range to the register list.
2751      while (Reg != EndReg) {
2752        Reg = getNextRegister(Reg);
2753        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2754      }
2755      continue;
2756    }
2757    Parser.Lex(); // Eat the comma.
2758    RegLoc = Parser.getTok().getLoc();
2759    int OldReg = Reg;
2760    const AsmToken RegTok = Parser.getTok();
2761    Reg = tryParseRegister();
2762    if (Reg == -1)
2763      return Error(RegLoc, "register expected");
2764    // Allow Q regs and just interpret them as the two D sub-registers.
2765    bool isQReg = false;
2766    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2767      Reg = getDRegFromQReg(Reg);
2768      isQReg = true;
2769    }
2770    // The register must be in the same register class as the first.
2771    if (!RC->contains(Reg))
2772      return Error(RegLoc, "invalid register in register list");
2773    // List must be monotonically increasing.
2774    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2775      return Error(RegLoc, "register list not in ascending order");
2776    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2777      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2778              ") in register list");
2779      continue;
2780    }
2781    // VFP register lists must also be contiguous.
2782    // It's OK to use the enumeration values directly here rather, as the
2783    // VFP register classes have the enum sorted properly.
2784    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2785        Reg != OldReg + 1)
2786      return Error(RegLoc, "non-contiguous register range");
2787    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2788    if (isQReg)
2789      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2790  }
2791
2792  SMLoc E = Parser.getTok().getLoc();
2793  if (Parser.getTok().isNot(AsmToken::RCurly))
2794    return Error(E, "'}' expected");
2795  Parser.Lex(); // Eat '}' token.
2796
2797  // Push the register list operand.
2798  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2799
2800  // The ARM system instruction variants for LDM/STM have a '^' token here.
2801  if (Parser.getTok().is(AsmToken::Caret)) {
2802    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2803    Parser.Lex(); // Eat '^' token.
2804  }
2805
2806  return false;
2807}
2808
2809// Helper function to parse the lane index for vector lists.
2810ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2811parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2812  Index = 0; // Always return a defined index value.
2813  if (Parser.getTok().is(AsmToken::LBrac)) {
2814    Parser.Lex(); // Eat the '['.
2815    if (Parser.getTok().is(AsmToken::RBrac)) {
2816      // "Dn[]" is the 'all lanes' syntax.
2817      LaneKind = AllLanes;
2818      Parser.Lex(); // Eat the ']'.
2819      return MatchOperand_Success;
2820    }
2821    if (Parser.getTok().is(AsmToken::Integer)) {
2822      int64_t Val = Parser.getTok().getIntVal();
2823      // Make this range check context sensitive for .8, .16, .32.
2824      if (Val < 0 && Val > 7)
2825        Error(Parser.getTok().getLoc(), "lane index out of range");
2826      Index = Val;
2827      LaneKind = IndexedLane;
2828      Parser.Lex(); // Eat the token;
2829      if (Parser.getTok().isNot(AsmToken::RBrac))
2830        Error(Parser.getTok().getLoc(), "']' expected");
2831      Parser.Lex(); // Eat the ']'.
2832      return MatchOperand_Success;
2833    }
2834    Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2835    return MatchOperand_ParseFail;
2836  }
2837  LaneKind = NoLanes;
2838  return MatchOperand_Success;
2839}
2840
2841// parse a vector register list
2842ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2843parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2844  VectorLaneTy LaneKind;
2845  unsigned LaneIndex;
2846  SMLoc S = Parser.getTok().getLoc();
2847  // As an extension (to match gas), support a plain D register or Q register
2848  // (without encosing curly braces) as a single or double entry list,
2849  // respectively.
2850  if (Parser.getTok().is(AsmToken::Identifier)) {
2851    int Reg = tryParseRegister();
2852    if (Reg == -1)
2853      return MatchOperand_NoMatch;
2854    SMLoc E = Parser.getTok().getLoc();
2855    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2856      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2857      if (Res != MatchOperand_Success)
2858        return Res;
2859      switch (LaneKind) {
2860      default:
2861        assert(0 && "unexpected lane kind!");
2862      case NoLanes:
2863        E = Parser.getTok().getLoc();
2864        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2865        break;
2866      case AllLanes:
2867        E = Parser.getTok().getLoc();
2868        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2869        break;
2870      case IndexedLane:
2871        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2872                                                               LaneIndex,
2873                                                               false, S, E));
2874        break;
2875      }
2876      return MatchOperand_Success;
2877    }
2878    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2879      Reg = getDRegFromQReg(Reg);
2880      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2881      if (Res != MatchOperand_Success)
2882        return Res;
2883      switch (LaneKind) {
2884      default:
2885        assert(0 && "unexpected lane kind!");
2886      case NoLanes:
2887        E = Parser.getTok().getLoc();
2888        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2889        break;
2890      case AllLanes:
2891        E = Parser.getTok().getLoc();
2892        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2893        break;
2894      case IndexedLane:
2895        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2896                                                               LaneIndex,
2897                                                               false, S, E));
2898        break;
2899      }
2900      return MatchOperand_Success;
2901    }
2902    Error(S, "vector register expected");
2903    return MatchOperand_ParseFail;
2904  }
2905
2906  if (Parser.getTok().isNot(AsmToken::LCurly))
2907    return MatchOperand_NoMatch;
2908
2909  Parser.Lex(); // Eat '{' token.
2910  SMLoc RegLoc = Parser.getTok().getLoc();
2911
2912  int Reg = tryParseRegister();
2913  if (Reg == -1) {
2914    Error(RegLoc, "register expected");
2915    return MatchOperand_ParseFail;
2916  }
2917  unsigned Count = 1;
2918  int Spacing = 0;
2919  unsigned FirstReg = Reg;
2920  // The list is of D registers, but we also allow Q regs and just interpret
2921  // them as the two D sub-registers.
2922  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2923    FirstReg = Reg = getDRegFromQReg(Reg);
2924    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2925                 // it's ambiguous with four-register single spaced.
2926    ++Reg;
2927    ++Count;
2928  }
2929  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2930    return MatchOperand_ParseFail;
2931
2932  while (Parser.getTok().is(AsmToken::Comma) ||
2933         Parser.getTok().is(AsmToken::Minus)) {
2934    if (Parser.getTok().is(AsmToken::Minus)) {
2935      if (!Spacing)
2936        Spacing = 1; // Register range implies a single spaced list.
2937      else if (Spacing == 2) {
2938        Error(Parser.getTok().getLoc(),
2939              "sequential registers in double spaced list");
2940        return MatchOperand_ParseFail;
2941      }
2942      Parser.Lex(); // Eat the minus.
2943      SMLoc EndLoc = Parser.getTok().getLoc();
2944      int EndReg = tryParseRegister();
2945      if (EndReg == -1) {
2946        Error(EndLoc, "register expected");
2947        return MatchOperand_ParseFail;
2948      }
2949      // Allow Q regs and just interpret them as the two D sub-registers.
2950      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2951        EndReg = getDRegFromQReg(EndReg) + 1;
2952      // If the register is the same as the start reg, there's nothing
2953      // more to do.
2954      if (Reg == EndReg)
2955        continue;
2956      // The register must be in the same register class as the first.
2957      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2958        Error(EndLoc, "invalid register in register list");
2959        return MatchOperand_ParseFail;
2960      }
2961      // Ranges must go from low to high.
2962      if (Reg > EndReg) {
2963        Error(EndLoc, "bad range in register list");
2964        return MatchOperand_ParseFail;
2965      }
2966      // Parse the lane specifier if present.
2967      VectorLaneTy NextLaneKind;
2968      unsigned NextLaneIndex;
2969      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2970        return MatchOperand_ParseFail;
2971      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2972        Error(EndLoc, "mismatched lane index in register list");
2973        return MatchOperand_ParseFail;
2974      }
2975      EndLoc = Parser.getTok().getLoc();
2976
2977      // Add all the registers in the range to the register list.
2978      Count += EndReg - Reg;
2979      Reg = EndReg;
2980      continue;
2981    }
2982    Parser.Lex(); // Eat the comma.
2983    RegLoc = Parser.getTok().getLoc();
2984    int OldReg = Reg;
2985    Reg = tryParseRegister();
2986    if (Reg == -1) {
2987      Error(RegLoc, "register expected");
2988      return MatchOperand_ParseFail;
2989    }
2990    // vector register lists must be contiguous.
2991    // It's OK to use the enumeration values directly here rather, as the
2992    // VFP register classes have the enum sorted properly.
2993    //
2994    // The list is of D registers, but we also allow Q regs and just interpret
2995    // them as the two D sub-registers.
2996    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2997      if (!Spacing)
2998        Spacing = 1; // Register range implies a single spaced list.
2999      else if (Spacing == 2) {
3000        Error(RegLoc,
3001              "invalid register in double-spaced list (must be 'D' register')");
3002        return MatchOperand_ParseFail;
3003      }
3004      Reg = getDRegFromQReg(Reg);
3005      if (Reg != OldReg + 1) {
3006        Error(RegLoc, "non-contiguous register range");
3007        return MatchOperand_ParseFail;
3008      }
3009      ++Reg;
3010      Count += 2;
3011      // Parse the lane specifier if present.
3012      VectorLaneTy NextLaneKind;
3013      unsigned NextLaneIndex;
3014      SMLoc EndLoc = Parser.getTok().getLoc();
3015      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3016        return MatchOperand_ParseFail;
3017      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3018        Error(EndLoc, "mismatched lane index in register list");
3019        return MatchOperand_ParseFail;
3020      }
3021      continue;
3022    }
3023    // Normal D register.
3024    // Figure out the register spacing (single or double) of the list if
3025    // we don't know it already.
3026    if (!Spacing)
3027      Spacing = 1 + (Reg == OldReg + 2);
3028
3029    // Just check that it's contiguous and keep going.
3030    if (Reg != OldReg + Spacing) {
3031      Error(RegLoc, "non-contiguous register range");
3032      return MatchOperand_ParseFail;
3033    }
3034    ++Count;
3035    // Parse the lane specifier if present.
3036    VectorLaneTy NextLaneKind;
3037    unsigned NextLaneIndex;
3038    SMLoc EndLoc = Parser.getTok().getLoc();
3039    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3040      return MatchOperand_ParseFail;
3041    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3042      Error(EndLoc, "mismatched lane index in register list");
3043      return MatchOperand_ParseFail;
3044    }
3045  }
3046
3047  SMLoc E = Parser.getTok().getLoc();
3048  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3049    Error(E, "'}' expected");
3050    return MatchOperand_ParseFail;
3051  }
3052  Parser.Lex(); // Eat '}' token.
3053
3054  switch (LaneKind) {
3055  default:
3056    assert(0 && "unexpected lane kind in register list.");
3057  case NoLanes:
3058    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3059                                                    (Spacing == 2), S, E));
3060    break;
3061  case AllLanes:
3062    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3063                                                            S, E));
3064    break;
3065  case IndexedLane:
3066    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3067                                                           LaneIndex,
3068                                                           (Spacing == 2),
3069                                                           S, E));
3070    break;
3071  }
3072  return MatchOperand_Success;
3073}
3074
3075/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3076ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3077parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3078  SMLoc S = Parser.getTok().getLoc();
3079  const AsmToken &Tok = Parser.getTok();
3080  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3081  StringRef OptStr = Tok.getString();
3082
3083  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3084    .Case("sy",    ARM_MB::SY)
3085    .Case("st",    ARM_MB::ST)
3086    .Case("sh",    ARM_MB::ISH)
3087    .Case("ish",   ARM_MB::ISH)
3088    .Case("shst",  ARM_MB::ISHST)
3089    .Case("ishst", ARM_MB::ISHST)
3090    .Case("nsh",   ARM_MB::NSH)
3091    .Case("un",    ARM_MB::NSH)
3092    .Case("nshst", ARM_MB::NSHST)
3093    .Case("unst",  ARM_MB::NSHST)
3094    .Case("osh",   ARM_MB::OSH)
3095    .Case("oshst", ARM_MB::OSHST)
3096    .Default(~0U);
3097
3098  if (Opt == ~0U)
3099    return MatchOperand_NoMatch;
3100
3101  Parser.Lex(); // Eat identifier token.
3102  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3103  return MatchOperand_Success;
3104}
3105
3106/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3107ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3108parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3109  SMLoc S = Parser.getTok().getLoc();
3110  const AsmToken &Tok = Parser.getTok();
3111  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3112  StringRef IFlagsStr = Tok.getString();
3113
3114  // An iflags string of "none" is interpreted to mean that none of the AIF
3115  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3116  unsigned IFlags = 0;
3117  if (IFlagsStr != "none") {
3118        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3119      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3120        .Case("a", ARM_PROC::A)
3121        .Case("i", ARM_PROC::I)
3122        .Case("f", ARM_PROC::F)
3123        .Default(~0U);
3124
3125      // If some specific iflag is already set, it means that some letter is
3126      // present more than once, this is not acceptable.
3127      if (Flag == ~0U || (IFlags & Flag))
3128        return MatchOperand_NoMatch;
3129
3130      IFlags |= Flag;
3131    }
3132  }
3133
3134  Parser.Lex(); // Eat identifier token.
3135  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3136  return MatchOperand_Success;
3137}
3138
3139/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3140ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3141parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3142  SMLoc S = Parser.getTok().getLoc();
3143  const AsmToken &Tok = Parser.getTok();
3144  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3145  StringRef Mask = Tok.getString();
3146
3147  if (isMClass()) {
3148    // See ARMv6-M 10.1.1
3149    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3150      .Case("apsr", 0)
3151      .Case("iapsr", 1)
3152      .Case("eapsr", 2)
3153      .Case("xpsr", 3)
3154      .Case("ipsr", 5)
3155      .Case("epsr", 6)
3156      .Case("iepsr", 7)
3157      .Case("msp", 8)
3158      .Case("psp", 9)
3159      .Case("primask", 16)
3160      .Case("basepri", 17)
3161      .Case("basepri_max", 18)
3162      .Case("faultmask", 19)
3163      .Case("control", 20)
3164      .Default(~0U);
3165
3166    if (FlagsVal == ~0U)
3167      return MatchOperand_NoMatch;
3168
3169    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3170      // basepri, basepri_max and faultmask only valid for V7m.
3171      return MatchOperand_NoMatch;
3172
3173    Parser.Lex(); // Eat identifier token.
3174    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3175    return MatchOperand_Success;
3176  }
3177
3178  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3179  size_t Start = 0, Next = Mask.find('_');
3180  StringRef Flags = "";
3181  std::string SpecReg = Mask.slice(Start, Next).lower();
3182  if (Next != StringRef::npos)
3183    Flags = Mask.slice(Next+1, Mask.size());
3184
3185  // FlagsVal contains the complete mask:
3186  // 3-0: Mask
3187  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3188  unsigned FlagsVal = 0;
3189
3190  if (SpecReg == "apsr") {
3191    FlagsVal = StringSwitch<unsigned>(Flags)
3192    .Case("nzcvq",  0x8) // same as CPSR_f
3193    .Case("g",      0x4) // same as CPSR_s
3194    .Case("nzcvqg", 0xc) // same as CPSR_fs
3195    .Default(~0U);
3196
3197    if (FlagsVal == ~0U) {
3198      if (!Flags.empty())
3199        return MatchOperand_NoMatch;
3200      else
3201        FlagsVal = 8; // No flag
3202    }
3203  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3204    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3205      Flags = "fc";
3206    for (int i = 0, e = Flags.size(); i != e; ++i) {
3207      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3208      .Case("c", 1)
3209      .Case("x", 2)
3210      .Case("s", 4)
3211      .Case("f", 8)
3212      .Default(~0U);
3213
3214      // If some specific flag is already set, it means that some letter is
3215      // present more than once, this is not acceptable.
3216      if (FlagsVal == ~0U || (FlagsVal & Flag))
3217        return MatchOperand_NoMatch;
3218      FlagsVal |= Flag;
3219    }
3220  } else // No match for special register.
3221    return MatchOperand_NoMatch;
3222
3223  // Special register without flags is NOT equivalent to "fc" flags.
3224  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3225  // two lines would enable gas compatibility at the expense of breaking
3226  // round-tripping.
3227  //
3228  // if (!FlagsVal)
3229  //  FlagsVal = 0x9;
3230
3231  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3232  if (SpecReg == "spsr")
3233    FlagsVal |= 16;
3234
3235  Parser.Lex(); // Eat identifier token.
3236  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3237  return MatchOperand_Success;
3238}
3239
3240ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3241parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3242            int Low, int High) {
3243  const AsmToken &Tok = Parser.getTok();
3244  if (Tok.isNot(AsmToken::Identifier)) {
3245    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3246    return MatchOperand_ParseFail;
3247  }
3248  StringRef ShiftName = Tok.getString();
3249  std::string LowerOp = Op.lower();
3250  std::string UpperOp = Op.upper();
3251  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3252    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3253    return MatchOperand_ParseFail;
3254  }
3255  Parser.Lex(); // Eat shift type token.
3256
3257  // There must be a '#' and a shift amount.
3258  if (Parser.getTok().isNot(AsmToken::Hash) &&
3259      Parser.getTok().isNot(AsmToken::Dollar)) {
3260    Error(Parser.getTok().getLoc(), "'#' expected");
3261    return MatchOperand_ParseFail;
3262  }
3263  Parser.Lex(); // Eat hash token.
3264
3265  const MCExpr *ShiftAmount;
3266  SMLoc Loc = Parser.getTok().getLoc();
3267  if (getParser().ParseExpression(ShiftAmount)) {
3268    Error(Loc, "illegal expression");
3269    return MatchOperand_ParseFail;
3270  }
3271  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3272  if (!CE) {
3273    Error(Loc, "constant expression expected");
3274    return MatchOperand_ParseFail;
3275  }
3276  int Val = CE->getValue();
3277  if (Val < Low || Val > High) {
3278    Error(Loc, "immediate value out of range");
3279    return MatchOperand_ParseFail;
3280  }
3281
3282  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3283
3284  return MatchOperand_Success;
3285}
3286
3287ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3288parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3289  const AsmToken &Tok = Parser.getTok();
3290  SMLoc S = Tok.getLoc();
3291  if (Tok.isNot(AsmToken::Identifier)) {
3292    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3293    return MatchOperand_ParseFail;
3294  }
3295  int Val = StringSwitch<int>(Tok.getString())
3296    .Case("be", 1)
3297    .Case("le", 0)
3298    .Default(-1);
3299  Parser.Lex(); // Eat the token.
3300
3301  if (Val == -1) {
3302    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3303    return MatchOperand_ParseFail;
3304  }
3305  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3306                                                                  getContext()),
3307                                           S, Parser.getTok().getLoc()));
3308  return MatchOperand_Success;
3309}
3310
3311/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3312/// instructions. Legal values are:
3313///     lsl #n  'n' in [0,31]
3314///     asr #n  'n' in [1,32]
3315///             n == 32 encoded as n == 0.
3316ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3317parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3318  const AsmToken &Tok = Parser.getTok();
3319  SMLoc S = Tok.getLoc();
3320  if (Tok.isNot(AsmToken::Identifier)) {
3321    Error(S, "shift operator 'asr' or 'lsl' expected");
3322    return MatchOperand_ParseFail;
3323  }
3324  StringRef ShiftName = Tok.getString();
3325  bool isASR;
3326  if (ShiftName == "lsl" || ShiftName == "LSL")
3327    isASR = false;
3328  else if (ShiftName == "asr" || ShiftName == "ASR")
3329    isASR = true;
3330  else {
3331    Error(S, "shift operator 'asr' or 'lsl' expected");
3332    return MatchOperand_ParseFail;
3333  }
3334  Parser.Lex(); // Eat the operator.
3335
3336  // A '#' and a shift amount.
3337  if (Parser.getTok().isNot(AsmToken::Hash) &&
3338      Parser.getTok().isNot(AsmToken::Dollar)) {
3339    Error(Parser.getTok().getLoc(), "'#' expected");
3340    return MatchOperand_ParseFail;
3341  }
3342  Parser.Lex(); // Eat hash token.
3343
3344  const MCExpr *ShiftAmount;
3345  SMLoc E = Parser.getTok().getLoc();
3346  if (getParser().ParseExpression(ShiftAmount)) {
3347    Error(E, "malformed shift expression");
3348    return MatchOperand_ParseFail;
3349  }
3350  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3351  if (!CE) {
3352    Error(E, "shift amount must be an immediate");
3353    return MatchOperand_ParseFail;
3354  }
3355
3356  int64_t Val = CE->getValue();
3357  if (isASR) {
3358    // Shift amount must be in [1,32]
3359    if (Val < 1 || Val > 32) {
3360      Error(E, "'asr' shift amount must be in range [1,32]");
3361      return MatchOperand_ParseFail;
3362    }
3363    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3364    if (isThumb() && Val == 32) {
3365      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3366      return MatchOperand_ParseFail;
3367    }
3368    if (Val == 32) Val = 0;
3369  } else {
3370    // Shift amount must be in [1,32]
3371    if (Val < 0 || Val > 31) {
3372      Error(E, "'lsr' shift amount must be in range [0,31]");
3373      return MatchOperand_ParseFail;
3374    }
3375  }
3376
3377  E = Parser.getTok().getLoc();
3378  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3379
3380  return MatchOperand_Success;
3381}
3382
3383/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3384/// of instructions. Legal values are:
3385///     ror #n  'n' in {0, 8, 16, 24}
3386ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3387parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3388  const AsmToken &Tok = Parser.getTok();
3389  SMLoc S = Tok.getLoc();
3390  if (Tok.isNot(AsmToken::Identifier))
3391    return MatchOperand_NoMatch;
3392  StringRef ShiftName = Tok.getString();
3393  if (ShiftName != "ror" && ShiftName != "ROR")
3394    return MatchOperand_NoMatch;
3395  Parser.Lex(); // Eat the operator.
3396
3397  // A '#' and a rotate amount.
3398  if (Parser.getTok().isNot(AsmToken::Hash) &&
3399      Parser.getTok().isNot(AsmToken::Dollar)) {
3400    Error(Parser.getTok().getLoc(), "'#' expected");
3401    return MatchOperand_ParseFail;
3402  }
3403  Parser.Lex(); // Eat hash token.
3404
3405  const MCExpr *ShiftAmount;
3406  SMLoc E = Parser.getTok().getLoc();
3407  if (getParser().ParseExpression(ShiftAmount)) {
3408    Error(E, "malformed rotate expression");
3409    return MatchOperand_ParseFail;
3410  }
3411  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3412  if (!CE) {
3413    Error(E, "rotate amount must be an immediate");
3414    return MatchOperand_ParseFail;
3415  }
3416
3417  int64_t Val = CE->getValue();
3418  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3419  // normally, zero is represented in asm by omitting the rotate operand
3420  // entirely.
3421  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3422    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3423    return MatchOperand_ParseFail;
3424  }
3425
3426  E = Parser.getTok().getLoc();
3427  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3428
3429  return MatchOperand_Success;
3430}
3431
3432ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3433parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3434  SMLoc S = Parser.getTok().getLoc();
3435  // The bitfield descriptor is really two operands, the LSB and the width.
3436  if (Parser.getTok().isNot(AsmToken::Hash) &&
3437      Parser.getTok().isNot(AsmToken::Dollar)) {
3438    Error(Parser.getTok().getLoc(), "'#' expected");
3439    return MatchOperand_ParseFail;
3440  }
3441  Parser.Lex(); // Eat hash token.
3442
3443  const MCExpr *LSBExpr;
3444  SMLoc E = Parser.getTok().getLoc();
3445  if (getParser().ParseExpression(LSBExpr)) {
3446    Error(E, "malformed immediate expression");
3447    return MatchOperand_ParseFail;
3448  }
3449  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3450  if (!CE) {
3451    Error(E, "'lsb' operand must be an immediate");
3452    return MatchOperand_ParseFail;
3453  }
3454
3455  int64_t LSB = CE->getValue();
3456  // The LSB must be in the range [0,31]
3457  if (LSB < 0 || LSB > 31) {
3458    Error(E, "'lsb' operand must be in the range [0,31]");
3459    return MatchOperand_ParseFail;
3460  }
3461  E = Parser.getTok().getLoc();
3462
3463  // Expect another immediate operand.
3464  if (Parser.getTok().isNot(AsmToken::Comma)) {
3465    Error(Parser.getTok().getLoc(), "too few operands");
3466    return MatchOperand_ParseFail;
3467  }
3468  Parser.Lex(); // Eat hash token.
3469  if (Parser.getTok().isNot(AsmToken::Hash) &&
3470      Parser.getTok().isNot(AsmToken::Dollar)) {
3471    Error(Parser.getTok().getLoc(), "'#' expected");
3472    return MatchOperand_ParseFail;
3473  }
3474  Parser.Lex(); // Eat hash token.
3475
3476  const MCExpr *WidthExpr;
3477  if (getParser().ParseExpression(WidthExpr)) {
3478    Error(E, "malformed immediate expression");
3479    return MatchOperand_ParseFail;
3480  }
3481  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3482  if (!CE) {
3483    Error(E, "'width' operand must be an immediate");
3484    return MatchOperand_ParseFail;
3485  }
3486
3487  int64_t Width = CE->getValue();
3488  // The LSB must be in the range [1,32-lsb]
3489  if (Width < 1 || Width > 32 - LSB) {
3490    Error(E, "'width' operand must be in the range [1,32-lsb]");
3491    return MatchOperand_ParseFail;
3492  }
3493  E = Parser.getTok().getLoc();
3494
3495  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3496
3497  return MatchOperand_Success;
3498}
3499
3500ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3501parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3502  // Check for a post-index addressing register operand. Specifically:
3503  // postidx_reg := '+' register {, shift}
3504  //              | '-' register {, shift}
3505  //              | register {, shift}
3506
3507  // This method must return MatchOperand_NoMatch without consuming any tokens
3508  // in the case where there is no match, as other alternatives take other
3509  // parse methods.
3510  AsmToken Tok = Parser.getTok();
3511  SMLoc S = Tok.getLoc();
3512  bool haveEaten = false;
3513  bool isAdd = true;
3514  int Reg = -1;
3515  if (Tok.is(AsmToken::Plus)) {
3516    Parser.Lex(); // Eat the '+' token.
3517    haveEaten = true;
3518  } else if (Tok.is(AsmToken::Minus)) {
3519    Parser.Lex(); // Eat the '-' token.
3520    isAdd = false;
3521    haveEaten = true;
3522  }
3523  if (Parser.getTok().is(AsmToken::Identifier))
3524    Reg = tryParseRegister();
3525  if (Reg == -1) {
3526    if (!haveEaten)
3527      return MatchOperand_NoMatch;
3528    Error(Parser.getTok().getLoc(), "register expected");
3529    return MatchOperand_ParseFail;
3530  }
3531  SMLoc E = Parser.getTok().getLoc();
3532
3533  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3534  unsigned ShiftImm = 0;
3535  if (Parser.getTok().is(AsmToken::Comma)) {
3536    Parser.Lex(); // Eat the ','.
3537    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3538      return MatchOperand_ParseFail;
3539  }
3540
3541  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3542                                                  ShiftImm, S, E));
3543
3544  return MatchOperand_Success;
3545}
3546
3547ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3548parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3549  // Check for a post-index addressing register operand. Specifically:
3550  // am3offset := '+' register
3551  //              | '-' register
3552  //              | register
3553  //              | # imm
3554  //              | # + imm
3555  //              | # - imm
3556
3557  // This method must return MatchOperand_NoMatch without consuming any tokens
3558  // in the case where there is no match, as other alternatives take other
3559  // parse methods.
3560  AsmToken Tok = Parser.getTok();
3561  SMLoc S = Tok.getLoc();
3562
3563  // Do immediates first, as we always parse those if we have a '#'.
3564  if (Parser.getTok().is(AsmToken::Hash) ||
3565      Parser.getTok().is(AsmToken::Dollar)) {
3566    Parser.Lex(); // Eat the '#'.
3567    // Explicitly look for a '-', as we need to encode negative zero
3568    // differently.
3569    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3570    const MCExpr *Offset;
3571    if (getParser().ParseExpression(Offset))
3572      return MatchOperand_ParseFail;
3573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3574    if (!CE) {
3575      Error(S, "constant expression expected");
3576      return MatchOperand_ParseFail;
3577    }
3578    SMLoc E = Tok.getLoc();
3579    // Negative zero is encoded as the flag value INT32_MIN.
3580    int32_t Val = CE->getValue();
3581    if (isNegative && Val == 0)
3582      Val = INT32_MIN;
3583
3584    Operands.push_back(
3585      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3586
3587    return MatchOperand_Success;
3588  }
3589
3590
3591  bool haveEaten = false;
3592  bool isAdd = true;
3593  int Reg = -1;
3594  if (Tok.is(AsmToken::Plus)) {
3595    Parser.Lex(); // Eat the '+' token.
3596    haveEaten = true;
3597  } else if (Tok.is(AsmToken::Minus)) {
3598    Parser.Lex(); // Eat the '-' token.
3599    isAdd = false;
3600    haveEaten = true;
3601  }
3602  if (Parser.getTok().is(AsmToken::Identifier))
3603    Reg = tryParseRegister();
3604  if (Reg == -1) {
3605    if (!haveEaten)
3606      return MatchOperand_NoMatch;
3607    Error(Parser.getTok().getLoc(), "register expected");
3608    return MatchOperand_ParseFail;
3609  }
3610  SMLoc E = Parser.getTok().getLoc();
3611
3612  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3613                                                  0, S, E));
3614
3615  return MatchOperand_Success;
3616}
3617
3618/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3619/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3620/// when they refer multiple MIOperands inside a single one.
3621bool ARMAsmParser::
3622cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3623             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3624  // Rt, Rt2
3625  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3626  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3627  // Create a writeback register dummy placeholder.
3628  Inst.addOperand(MCOperand::CreateReg(0));
3629  // addr
3630  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3631  // pred
3632  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3633  return true;
3634}
3635
3636/// cvtT2StrdPre - Convert parsed operands to MCInst.
3637/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3638/// when they refer multiple MIOperands inside a single one.
3639bool ARMAsmParser::
3640cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3641             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3642  // Create a writeback register dummy placeholder.
3643  Inst.addOperand(MCOperand::CreateReg(0));
3644  // Rt, Rt2
3645  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3646  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3647  // addr
3648  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3649  // pred
3650  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3651  return true;
3652}
3653
3654/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3655/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3656/// when they refer multiple MIOperands inside a single one.
3657bool ARMAsmParser::
3658cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3659                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3660  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3661
3662  // Create a writeback register dummy placeholder.
3663  Inst.addOperand(MCOperand::CreateImm(0));
3664
3665  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3666  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3667  return true;
3668}
3669
3670/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3671/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3672/// when they refer multiple MIOperands inside a single one.
3673bool ARMAsmParser::
3674cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3675                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3676  // Create a writeback register dummy placeholder.
3677  Inst.addOperand(MCOperand::CreateImm(0));
3678  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3679  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3680  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3681  return true;
3682}
3683
3684/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3685/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3686/// when they refer multiple MIOperands inside a single one.
3687bool ARMAsmParser::
3688cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3689                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3690  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3691
3692  // Create a writeback register dummy placeholder.
3693  Inst.addOperand(MCOperand::CreateImm(0));
3694
3695  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3696  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3697  return true;
3698}
3699
3700/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3701/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3702/// when they refer multiple MIOperands inside a single one.
3703bool ARMAsmParser::
3704cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3705                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3706  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3707
3708  // Create a writeback register dummy placeholder.
3709  Inst.addOperand(MCOperand::CreateImm(0));
3710
3711  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3712  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3713  return true;
3714}
3715
3716
3717/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3718/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3719/// when they refer multiple MIOperands inside a single one.
3720bool ARMAsmParser::
3721cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3722                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3723  // Create a writeback register dummy placeholder.
3724  Inst.addOperand(MCOperand::CreateImm(0));
3725  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3726  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3727  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3728  return true;
3729}
3730
3731/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3732/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3733/// when they refer multiple MIOperands inside a single one.
3734bool ARMAsmParser::
3735cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3736                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3737  // Create a writeback register dummy placeholder.
3738  Inst.addOperand(MCOperand::CreateImm(0));
3739  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3740  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3741  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3742  return true;
3743}
3744
3745/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3746/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3747/// when they refer multiple MIOperands inside a single one.
3748bool ARMAsmParser::
3749cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3750                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3751  // Create a writeback register dummy placeholder.
3752  Inst.addOperand(MCOperand::CreateImm(0));
3753  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3754  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3755  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3756  return true;
3757}
3758
3759/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3760/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3761/// when they refer multiple MIOperands inside a single one.
3762bool ARMAsmParser::
3763cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3764                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3765  // Rt
3766  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3767  // Create a writeback register dummy placeholder.
3768  Inst.addOperand(MCOperand::CreateImm(0));
3769  // addr
3770  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3771  // offset
3772  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3773  // pred
3774  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3775  return true;
3776}
3777
3778/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3779/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3780/// when they refer multiple MIOperands inside a single one.
3781bool ARMAsmParser::
3782cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3783                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3784  // Rt
3785  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3786  // Create a writeback register dummy placeholder.
3787  Inst.addOperand(MCOperand::CreateImm(0));
3788  // addr
3789  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3790  // offset
3791  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3792  // pred
3793  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3794  return true;
3795}
3796
3797/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3798/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3799/// when they refer multiple MIOperands inside a single one.
3800bool ARMAsmParser::
3801cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3802                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3803  // Create a writeback register dummy placeholder.
3804  Inst.addOperand(MCOperand::CreateImm(0));
3805  // Rt
3806  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3807  // addr
3808  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3809  // offset
3810  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3811  // pred
3812  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3813  return true;
3814}
3815
3816/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3817/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3818/// when they refer multiple MIOperands inside a single one.
3819bool ARMAsmParser::
3820cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3821                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3822  // Create a writeback register dummy placeholder.
3823  Inst.addOperand(MCOperand::CreateImm(0));
3824  // Rt
3825  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3826  // addr
3827  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3828  // offset
3829  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3830  // pred
3831  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3832  return true;
3833}
3834
3835/// cvtLdrdPre - Convert parsed operands to MCInst.
3836/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3837/// when they refer multiple MIOperands inside a single one.
3838bool ARMAsmParser::
3839cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3840           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3841  // Rt, Rt2
3842  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3843  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3844  // Create a writeback register dummy placeholder.
3845  Inst.addOperand(MCOperand::CreateImm(0));
3846  // addr
3847  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3848  // pred
3849  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3850  return true;
3851}
3852
3853/// cvtStrdPre - Convert parsed operands to MCInst.
3854/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3855/// when they refer multiple MIOperands inside a single one.
3856bool ARMAsmParser::
3857cvtStrdPre(MCInst &Inst, unsigned Opcode,
3858           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3859  // Create a writeback register dummy placeholder.
3860  Inst.addOperand(MCOperand::CreateImm(0));
3861  // Rt, Rt2
3862  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3863  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3864  // addr
3865  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3866  // pred
3867  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3868  return true;
3869}
3870
3871/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3872/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3873/// when they refer multiple MIOperands inside a single one.
3874bool ARMAsmParser::
3875cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3876                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3877  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3878  // Create a writeback register dummy placeholder.
3879  Inst.addOperand(MCOperand::CreateImm(0));
3880  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3881  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3882  return true;
3883}
3884
3885/// cvtThumbMultiple- Convert parsed operands to MCInst.
3886/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3887/// when they refer multiple MIOperands inside a single one.
3888bool ARMAsmParser::
3889cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3890           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3891  // The second source operand must be the same register as the destination
3892  // operand.
3893  if (Operands.size() == 6 &&
3894      (((ARMOperand*)Operands[3])->getReg() !=
3895       ((ARMOperand*)Operands[5])->getReg()) &&
3896      (((ARMOperand*)Operands[3])->getReg() !=
3897       ((ARMOperand*)Operands[4])->getReg())) {
3898    Error(Operands[3]->getStartLoc(),
3899          "destination register must match source register");
3900    return false;
3901  }
3902  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3903  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3904  // If we have a three-operand form, make sure to set Rn to be the operand
3905  // that isn't the same as Rd.
3906  unsigned RegOp = 4;
3907  if (Operands.size() == 6 &&
3908      ((ARMOperand*)Operands[4])->getReg() ==
3909        ((ARMOperand*)Operands[3])->getReg())
3910    RegOp = 5;
3911  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3912  Inst.addOperand(Inst.getOperand(0));
3913  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3914
3915  return true;
3916}
3917
3918bool ARMAsmParser::
3919cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3920              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3921  // Vd
3922  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3923  // Create a writeback register dummy placeholder.
3924  Inst.addOperand(MCOperand::CreateImm(0));
3925  // Vn
3926  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3927  // pred
3928  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3929  return true;
3930}
3931
3932bool ARMAsmParser::
3933cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3934                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3935  // Vd
3936  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3937  // Create a writeback register dummy placeholder.
3938  Inst.addOperand(MCOperand::CreateImm(0));
3939  // Vn
3940  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3941  // Vm
3942  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3943  // pred
3944  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3945  return true;
3946}
3947
3948bool ARMAsmParser::
3949cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3950              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3951  // Create a writeback register dummy placeholder.
3952  Inst.addOperand(MCOperand::CreateImm(0));
3953  // Vn
3954  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3955  // Vt
3956  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3957  // pred
3958  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3959  return true;
3960}
3961
3962bool ARMAsmParser::
3963cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3964                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3965  // Create a writeback register dummy placeholder.
3966  Inst.addOperand(MCOperand::CreateImm(0));
3967  // Vn
3968  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3969  // Vm
3970  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3971  // Vt
3972  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3973  // pred
3974  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3975  return true;
3976}
3977
3978/// Parse an ARM memory expression, return false if successful else return true
3979/// or an error.  The first token must be a '[' when called.
3980bool ARMAsmParser::
3981parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3982  SMLoc S, E;
3983  assert(Parser.getTok().is(AsmToken::LBrac) &&
3984         "Token is not a Left Bracket");
3985  S = Parser.getTok().getLoc();
3986  Parser.Lex(); // Eat left bracket token.
3987
3988  const AsmToken &BaseRegTok = Parser.getTok();
3989  int BaseRegNum = tryParseRegister();
3990  if (BaseRegNum == -1)
3991    return Error(BaseRegTok.getLoc(), "register expected");
3992
3993  // The next token must either be a comma or a closing bracket.
3994  const AsmToken &Tok = Parser.getTok();
3995  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3996    return Error(Tok.getLoc(), "malformed memory operand");
3997
3998  if (Tok.is(AsmToken::RBrac)) {
3999    E = Tok.getLoc();
4000    Parser.Lex(); // Eat right bracket token.
4001
4002    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4003                                             0, 0, false, S, E));
4004
4005    // If there's a pre-indexing writeback marker, '!', just add it as a token
4006    // operand. It's rather odd, but syntactically valid.
4007    if (Parser.getTok().is(AsmToken::Exclaim)) {
4008      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4009      Parser.Lex(); // Eat the '!'.
4010    }
4011
4012    return false;
4013  }
4014
4015  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4016  Parser.Lex(); // Eat the comma.
4017
4018  // If we have a ':', it's an alignment specifier.
4019  if (Parser.getTok().is(AsmToken::Colon)) {
4020    Parser.Lex(); // Eat the ':'.
4021    E = Parser.getTok().getLoc();
4022
4023    const MCExpr *Expr;
4024    if (getParser().ParseExpression(Expr))
4025     return true;
4026
4027    // The expression has to be a constant. Memory references with relocations
4028    // don't come through here, as they use the <label> forms of the relevant
4029    // instructions.
4030    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4031    if (!CE)
4032      return Error (E, "constant expression expected");
4033
4034    unsigned Align = 0;
4035    switch (CE->getValue()) {
4036    default:
4037      return Error(E,
4038                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4039    case 16:  Align = 2; break;
4040    case 32:  Align = 4; break;
4041    case 64:  Align = 8; break;
4042    case 128: Align = 16; break;
4043    case 256: Align = 32; break;
4044    }
4045
4046    // Now we should have the closing ']'
4047    E = Parser.getTok().getLoc();
4048    if (Parser.getTok().isNot(AsmToken::RBrac))
4049      return Error(E, "']' expected");
4050    Parser.Lex(); // Eat right bracket token.
4051
4052    // Don't worry about range checking the value here. That's handled by
4053    // the is*() predicates.
4054    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4055                                             ARM_AM::no_shift, 0, Align,
4056                                             false, S, E));
4057
4058    // If there's a pre-indexing writeback marker, '!', just add it as a token
4059    // operand.
4060    if (Parser.getTok().is(AsmToken::Exclaim)) {
4061      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4062      Parser.Lex(); // Eat the '!'.
4063    }
4064
4065    return false;
4066  }
4067
4068  // If we have a '#', it's an immediate offset, else assume it's a register
4069  // offset. Be friendly and also accept a plain integer (without a leading
4070  // hash) for gas compatibility.
4071  if (Parser.getTok().is(AsmToken::Hash) ||
4072      Parser.getTok().is(AsmToken::Dollar) ||
4073      Parser.getTok().is(AsmToken::Integer)) {
4074    if (Parser.getTok().isNot(AsmToken::Integer))
4075      Parser.Lex(); // Eat the '#'.
4076    E = Parser.getTok().getLoc();
4077
4078    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4079    const MCExpr *Offset;
4080    if (getParser().ParseExpression(Offset))
4081     return true;
4082
4083    // The expression has to be a constant. Memory references with relocations
4084    // don't come through here, as they use the <label> forms of the relevant
4085    // instructions.
4086    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4087    if (!CE)
4088      return Error (E, "constant expression expected");
4089
4090    // If the constant was #-0, represent it as INT32_MIN.
4091    int32_t Val = CE->getValue();
4092    if (isNegative && Val == 0)
4093      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4094
4095    // Now we should have the closing ']'
4096    E = Parser.getTok().getLoc();
4097    if (Parser.getTok().isNot(AsmToken::RBrac))
4098      return Error(E, "']' expected");
4099    Parser.Lex(); // Eat right bracket token.
4100
4101    // Don't worry about range checking the value here. That's handled by
4102    // the is*() predicates.
4103    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4104                                             ARM_AM::no_shift, 0, 0,
4105                                             false, S, E));
4106
4107    // If there's a pre-indexing writeback marker, '!', just add it as a token
4108    // operand.
4109    if (Parser.getTok().is(AsmToken::Exclaim)) {
4110      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4111      Parser.Lex(); // Eat the '!'.
4112    }
4113
4114    return false;
4115  }
4116
4117  // The register offset is optionally preceded by a '+' or '-'
4118  bool isNegative = false;
4119  if (Parser.getTok().is(AsmToken::Minus)) {
4120    isNegative = true;
4121    Parser.Lex(); // Eat the '-'.
4122  } else if (Parser.getTok().is(AsmToken::Plus)) {
4123    // Nothing to do.
4124    Parser.Lex(); // Eat the '+'.
4125  }
4126
4127  E = Parser.getTok().getLoc();
4128  int OffsetRegNum = tryParseRegister();
4129  if (OffsetRegNum == -1)
4130    return Error(E, "register expected");
4131
4132  // If there's a shift operator, handle it.
4133  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4134  unsigned ShiftImm = 0;
4135  if (Parser.getTok().is(AsmToken::Comma)) {
4136    Parser.Lex(); // Eat the ','.
4137    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4138      return true;
4139  }
4140
4141  // Now we should have the closing ']'
4142  E = Parser.getTok().getLoc();
4143  if (Parser.getTok().isNot(AsmToken::RBrac))
4144    return Error(E, "']' expected");
4145  Parser.Lex(); // Eat right bracket token.
4146
4147  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4148                                           ShiftType, ShiftImm, 0, isNegative,
4149                                           S, E));
4150
4151  // If there's a pre-indexing writeback marker, '!', just add it as a token
4152  // operand.
4153  if (Parser.getTok().is(AsmToken::Exclaim)) {
4154    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4155    Parser.Lex(); // Eat the '!'.
4156  }
4157
4158  return false;
4159}
4160
4161/// parseMemRegOffsetShift - one of these two:
4162///   ( lsl | lsr | asr | ror ) , # shift_amount
4163///   rrx
4164/// return true if it parses a shift otherwise it returns false.
4165bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4166                                          unsigned &Amount) {
4167  SMLoc Loc = Parser.getTok().getLoc();
4168  const AsmToken &Tok = Parser.getTok();
4169  if (Tok.isNot(AsmToken::Identifier))
4170    return true;
4171  StringRef ShiftName = Tok.getString();
4172  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4173      ShiftName == "asl" || ShiftName == "ASL")
4174    St = ARM_AM::lsl;
4175  else if (ShiftName == "lsr" || ShiftName == "LSR")
4176    St = ARM_AM::lsr;
4177  else if (ShiftName == "asr" || ShiftName == "ASR")
4178    St = ARM_AM::asr;
4179  else if (ShiftName == "ror" || ShiftName == "ROR")
4180    St = ARM_AM::ror;
4181  else if (ShiftName == "rrx" || ShiftName == "RRX")
4182    St = ARM_AM::rrx;
4183  else
4184    return Error(Loc, "illegal shift operator");
4185  Parser.Lex(); // Eat shift type token.
4186
4187  // rrx stands alone.
4188  Amount = 0;
4189  if (St != ARM_AM::rrx) {
4190    Loc = Parser.getTok().getLoc();
4191    // A '#' and a shift amount.
4192    const AsmToken &HashTok = Parser.getTok();
4193    if (HashTok.isNot(AsmToken::Hash) &&
4194        HashTok.isNot(AsmToken::Dollar))
4195      return Error(HashTok.getLoc(), "'#' expected");
4196    Parser.Lex(); // Eat hash token.
4197
4198    const MCExpr *Expr;
4199    if (getParser().ParseExpression(Expr))
4200      return true;
4201    // Range check the immediate.
4202    // lsl, ror: 0 <= imm <= 31
4203    // lsr, asr: 0 <= imm <= 32
4204    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4205    if (!CE)
4206      return Error(Loc, "shift amount must be an immediate");
4207    int64_t Imm = CE->getValue();
4208    if (Imm < 0 ||
4209        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4210        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4211      return Error(Loc, "immediate shift value out of range");
4212    Amount = Imm;
4213  }
4214
4215  return false;
4216}
4217
4218/// parseFPImm - A floating point immediate expression operand.
4219ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4220parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4221  SMLoc S = Parser.getTok().getLoc();
4222
4223  if (Parser.getTok().isNot(AsmToken::Hash) &&
4224      Parser.getTok().isNot(AsmToken::Dollar))
4225    return MatchOperand_NoMatch;
4226
4227  // Disambiguate the VMOV forms that can accept an FP immediate.
4228  // vmov.f32 <sreg>, #imm
4229  // vmov.f64 <dreg>, #imm
4230  // vmov.f32 <dreg>, #imm  @ vector f32x2
4231  // vmov.f32 <qreg>, #imm  @ vector f32x4
4232  //
4233  // There are also the NEON VMOV instructions which expect an
4234  // integer constant. Make sure we don't try to parse an FPImm
4235  // for these:
4236  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4237  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4238  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4239                           TyOp->getToken() != ".f64"))
4240    return MatchOperand_NoMatch;
4241
4242  Parser.Lex(); // Eat the '#'.
4243
4244  // Handle negation, as that still comes through as a separate token.
4245  bool isNegative = false;
4246  if (Parser.getTok().is(AsmToken::Minus)) {
4247    isNegative = true;
4248    Parser.Lex();
4249  }
4250  const AsmToken &Tok = Parser.getTok();
4251  if (Tok.is(AsmToken::Real)) {
4252    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4253    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4254    // If we had a '-' in front, toggle the sign bit.
4255    IntVal ^= (uint64_t)isNegative << 63;
4256    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4257    Parser.Lex(); // Eat the token.
4258    if (Val == -1) {
4259      TokError("floating point value out of range");
4260      return MatchOperand_ParseFail;
4261    }
4262    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4263    return MatchOperand_Success;
4264  }
4265  if (Tok.is(AsmToken::Integer)) {
4266    int64_t Val = Tok.getIntVal();
4267    Parser.Lex(); // Eat the token.
4268    if (Val > 255 || Val < 0) {
4269      TokError("encoded floating point value out of range");
4270      return MatchOperand_ParseFail;
4271    }
4272    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4273    return MatchOperand_Success;
4274  }
4275
4276  TokError("invalid floating point immediate");
4277  return MatchOperand_ParseFail;
4278}
4279/// Parse a arm instruction operand.  For now this parses the operand regardless
4280/// of the mnemonic.
4281bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4282                                StringRef Mnemonic) {
4283  SMLoc S, E;
4284
4285  // Check if the current operand has a custom associated parser, if so, try to
4286  // custom parse the operand, or fallback to the general approach.
4287  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4288  if (ResTy == MatchOperand_Success)
4289    return false;
4290  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4291  // there was a match, but an error occurred, in which case, just return that
4292  // the operand parsing failed.
4293  if (ResTy == MatchOperand_ParseFail)
4294    return true;
4295
4296  switch (getLexer().getKind()) {
4297  default:
4298    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4299    return true;
4300  case AsmToken::Identifier: {
4301    if (!tryParseRegisterWithWriteBack(Operands))
4302      return false;
4303    int Res = tryParseShiftRegister(Operands);
4304    if (Res == 0) // success
4305      return false;
4306    else if (Res == -1) // irrecoverable error
4307      return true;
4308    // If this is VMRS, check for the apsr_nzcv operand.
4309    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4310      S = Parser.getTok().getLoc();
4311      Parser.Lex();
4312      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4313      return false;
4314    }
4315
4316    // Fall though for the Identifier case that is not a register or a
4317    // special name.
4318  }
4319  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4320  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4321  case AsmToken::String:  // quoted label names.
4322  case AsmToken::Dot: {   // . as a branch target
4323    // This was not a register so parse other operands that start with an
4324    // identifier (like labels) as expressions and create them as immediates.
4325    const MCExpr *IdVal;
4326    S = Parser.getTok().getLoc();
4327    if (getParser().ParseExpression(IdVal))
4328      return true;
4329    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4330    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4331    return false;
4332  }
4333  case AsmToken::LBrac:
4334    return parseMemory(Operands);
4335  case AsmToken::LCurly:
4336    return parseRegisterList(Operands);
4337  case AsmToken::Dollar:
4338  case AsmToken::Hash: {
4339    // #42 -> immediate.
4340    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4341    S = Parser.getTok().getLoc();
4342    Parser.Lex();
4343    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4344    const MCExpr *ImmVal;
4345    if (getParser().ParseExpression(ImmVal))
4346      return true;
4347    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4348    if (CE) {
4349      int32_t Val = CE->getValue();
4350      if (isNegative && Val == 0)
4351        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4352    }
4353    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4354    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4355    return false;
4356  }
4357  case AsmToken::Colon: {
4358    // ":lower16:" and ":upper16:" expression prefixes
4359    // FIXME: Check it's an expression prefix,
4360    // e.g. (FOO - :lower16:BAR) isn't legal.
4361    ARMMCExpr::VariantKind RefKind;
4362    if (parsePrefix(RefKind))
4363      return true;
4364
4365    const MCExpr *SubExprVal;
4366    if (getParser().ParseExpression(SubExprVal))
4367      return true;
4368
4369    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4370                                                   getContext());
4371    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4372    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4373    return false;
4374  }
4375  }
4376}
4377
4378// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4379//  :lower16: and :upper16:.
4380bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4381  RefKind = ARMMCExpr::VK_ARM_None;
4382
4383  // :lower16: and :upper16: modifiers
4384  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4385  Parser.Lex(); // Eat ':'
4386
4387  if (getLexer().isNot(AsmToken::Identifier)) {
4388    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4389    return true;
4390  }
4391
4392  StringRef IDVal = Parser.getTok().getIdentifier();
4393  if (IDVal == "lower16") {
4394    RefKind = ARMMCExpr::VK_ARM_LO16;
4395  } else if (IDVal == "upper16") {
4396    RefKind = ARMMCExpr::VK_ARM_HI16;
4397  } else {
4398    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4399    return true;
4400  }
4401  Parser.Lex();
4402
4403  if (getLexer().isNot(AsmToken::Colon)) {
4404    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4405    return true;
4406  }
4407  Parser.Lex(); // Eat the last ':'
4408  return false;
4409}
4410
4411/// \brief Given a mnemonic, split out possible predication code and carry
4412/// setting letters to form a canonical mnemonic and flags.
4413//
4414// FIXME: Would be nice to autogen this.
4415// FIXME: This is a bit of a maze of special cases.
4416StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4417                                      unsigned &PredicationCode,
4418                                      bool &CarrySetting,
4419                                      unsigned &ProcessorIMod,
4420                                      StringRef &ITMask) {
4421  PredicationCode = ARMCC::AL;
4422  CarrySetting = false;
4423  ProcessorIMod = 0;
4424
4425  // Ignore some mnemonics we know aren't predicated forms.
4426  //
4427  // FIXME: Would be nice to autogen this.
4428  if ((Mnemonic == "movs" && isThumb()) ||
4429      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4430      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4431      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4432      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4433      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4434      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4435      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4436      Mnemonic == "fmuls")
4437    return Mnemonic;
4438
4439  // First, split out any predication code. Ignore mnemonics we know aren't
4440  // predicated but do have a carry-set and so weren't caught above.
4441  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4442      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4443      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4444      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4445    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4446      .Case("eq", ARMCC::EQ)
4447      .Case("ne", ARMCC::NE)
4448      .Case("hs", ARMCC::HS)
4449      .Case("cs", ARMCC::HS)
4450      .Case("lo", ARMCC::LO)
4451      .Case("cc", ARMCC::LO)
4452      .Case("mi", ARMCC::MI)
4453      .Case("pl", ARMCC::PL)
4454      .Case("vs", ARMCC::VS)
4455      .Case("vc", ARMCC::VC)
4456      .Case("hi", ARMCC::HI)
4457      .Case("ls", ARMCC::LS)
4458      .Case("ge", ARMCC::GE)
4459      .Case("lt", ARMCC::LT)
4460      .Case("gt", ARMCC::GT)
4461      .Case("le", ARMCC::LE)
4462      .Case("al", ARMCC::AL)
4463      .Default(~0U);
4464    if (CC != ~0U) {
4465      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4466      PredicationCode = CC;
4467    }
4468  }
4469
4470  // Next, determine if we have a carry setting bit. We explicitly ignore all
4471  // the instructions we know end in 's'.
4472  if (Mnemonic.endswith("s") &&
4473      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4474        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4475        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4476        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4477        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4478        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4479        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4480        Mnemonic == "fmuls" ||
4481        (Mnemonic == "movs" && isThumb()))) {
4482    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4483    CarrySetting = true;
4484  }
4485
4486  // The "cps" instruction can have a interrupt mode operand which is glued into
4487  // the mnemonic. Check if this is the case, split it and parse the imod op
4488  if (Mnemonic.startswith("cps")) {
4489    // Split out any imod code.
4490    unsigned IMod =
4491      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4492      .Case("ie", ARM_PROC::IE)
4493      .Case("id", ARM_PROC::ID)
4494      .Default(~0U);
4495    if (IMod != ~0U) {
4496      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4497      ProcessorIMod = IMod;
4498    }
4499  }
4500
4501  // The "it" instruction has the condition mask on the end of the mnemonic.
4502  if (Mnemonic.startswith("it")) {
4503    ITMask = Mnemonic.slice(2, Mnemonic.size());
4504    Mnemonic = Mnemonic.slice(0, 2);
4505  }
4506
4507  return Mnemonic;
4508}
4509
4510/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4511/// inclusion of carry set or predication code operands.
4512//
4513// FIXME: It would be nice to autogen this.
4514void ARMAsmParser::
4515getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4516                      bool &CanAcceptPredicationCode) {
4517  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4518      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4519      Mnemonic == "add" || Mnemonic == "adc" ||
4520      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4521      Mnemonic == "orr" || Mnemonic == "mvn" ||
4522      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4523      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4524      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4525                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4526                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4527    CanAcceptCarrySet = true;
4528  } else
4529    CanAcceptCarrySet = false;
4530
4531  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4532      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4533      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4534      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4535      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4536      (Mnemonic == "clrex" && !isThumb()) ||
4537      (Mnemonic == "nop" && isThumbOne()) ||
4538      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4539        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4540        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4541      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4542       !isThumb()) ||
4543      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4544    CanAcceptPredicationCode = false;
4545  } else
4546    CanAcceptPredicationCode = true;
4547
4548  if (isThumb()) {
4549    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4550        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4551      CanAcceptPredicationCode = false;
4552  }
4553}
4554
4555bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4556                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4557  // FIXME: This is all horribly hacky. We really need a better way to deal
4558  // with optional operands like this in the matcher table.
4559
4560  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4561  // another does not. Specifically, the MOVW instruction does not. So we
4562  // special case it here and remove the defaulted (non-setting) cc_out
4563  // operand if that's the instruction we're trying to match.
4564  //
4565  // We do this as post-processing of the explicit operands rather than just
4566  // conditionally adding the cc_out in the first place because we need
4567  // to check the type of the parsed immediate operand.
4568  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4569      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4570      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4571      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4572    return true;
4573
4574  // Register-register 'add' for thumb does not have a cc_out operand
4575  // when there are only two register operands.
4576  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4577      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4578      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4579      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4580    return true;
4581  // Register-register 'add' for thumb does not have a cc_out operand
4582  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4583  // have to check the immediate range here since Thumb2 has a variant
4584  // that can handle a different range and has a cc_out operand.
4585  if (((isThumb() && Mnemonic == "add") ||
4586       (isThumbTwo() && Mnemonic == "sub")) &&
4587      Operands.size() == 6 &&
4588      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4589      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4590      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4591      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4592      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4593       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4594    return true;
4595  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4596  // imm0_4095 variant. That's the least-preferred variant when
4597  // selecting via the generic "add" mnemonic, so to know that we
4598  // should remove the cc_out operand, we have to explicitly check that
4599  // it's not one of the other variants. Ugh.
4600  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4601      Operands.size() == 6 &&
4602      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4603      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4604      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4605    // Nest conditions rather than one big 'if' statement for readability.
4606    //
4607    // If either register is a high reg, it's either one of the SP
4608    // variants (handled above) or a 32-bit encoding, so we just
4609    // check against T3.
4610    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4611         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4612        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4613      return false;
4614    // If both registers are low, we're in an IT block, and the immediate is
4615    // in range, we should use encoding T1 instead, which has a cc_out.
4616    if (inITBlock() &&
4617        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4618        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4619        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4620      return false;
4621
4622    // Otherwise, we use encoding T4, which does not have a cc_out
4623    // operand.
4624    return true;
4625  }
4626
4627  // The thumb2 multiply instruction doesn't have a CCOut register, so
4628  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4629  // use the 16-bit encoding or not.
4630  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4631      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4632      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4633      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4634      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4635      // If the registers aren't low regs, the destination reg isn't the
4636      // same as one of the source regs, or the cc_out operand is zero
4637      // outside of an IT block, we have to use the 32-bit encoding, so
4638      // remove the cc_out operand.
4639      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4640       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4641       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4642       !inITBlock() ||
4643       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4644        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4645        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4646        static_cast<ARMOperand*>(Operands[4])->getReg())))
4647    return true;
4648
4649  // Also check the 'mul' syntax variant that doesn't specify an explicit
4650  // destination register.
4651  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4652      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4653      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4654      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4655      // If the registers aren't low regs  or the cc_out operand is zero
4656      // outside of an IT block, we have to use the 32-bit encoding, so
4657      // remove the cc_out operand.
4658      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4659       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4660       !inITBlock()))
4661    return true;
4662
4663
4664
4665  // Register-register 'add/sub' for thumb does not have a cc_out operand
4666  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4667  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4668  // right, this will result in better diagnostics (which operand is off)
4669  // anyway.
4670  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4671      (Operands.size() == 5 || Operands.size() == 6) &&
4672      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4673      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4674      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4675    return true;
4676
4677  return false;
4678}
4679
4680static bool isDataTypeToken(StringRef Tok) {
4681  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4682    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4683    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4684    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4685    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4686    Tok == ".f" || Tok == ".d";
4687}
4688
4689// FIXME: This bit should probably be handled via an explicit match class
4690// in the .td files that matches the suffix instead of having it be
4691// a literal string token the way it is now.
4692static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4693  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4694}
4695
4696static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4697/// Parse an arm instruction mnemonic followed by its operands.
4698bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4699                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4700  // Apply mnemonic aliases before doing anything else, as the destination
4701  // mnemnonic may include suffices and we want to handle them normally.
4702  // The generic tblgen'erated code does this later, at the start of
4703  // MatchInstructionImpl(), but that's too late for aliases that include
4704  // any sort of suffix.
4705  unsigned AvailableFeatures = getAvailableFeatures();
4706  applyMnemonicAliases(Name, AvailableFeatures);
4707
4708  // First check for the ARM-specific .req directive.
4709  if (Parser.getTok().is(AsmToken::Identifier) &&
4710      Parser.getTok().getIdentifier() == ".req") {
4711    parseDirectiveReq(Name, NameLoc);
4712    // We always return 'error' for this, as we're done with this
4713    // statement and don't need to match the 'instruction."
4714    return true;
4715  }
4716
4717  // Create the leading tokens for the mnemonic, split by '.' characters.
4718  size_t Start = 0, Next = Name.find('.');
4719  StringRef Mnemonic = Name.slice(Start, Next);
4720
4721  // Split out the predication code and carry setting flag from the mnemonic.
4722  unsigned PredicationCode;
4723  unsigned ProcessorIMod;
4724  bool CarrySetting;
4725  StringRef ITMask;
4726  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4727                           ProcessorIMod, ITMask);
4728
4729  // In Thumb1, only the branch (B) instruction can be predicated.
4730  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4731    Parser.EatToEndOfStatement();
4732    return Error(NameLoc, "conditional execution not supported in Thumb1");
4733  }
4734
4735  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4736
4737  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4738  // is the mask as it will be for the IT encoding if the conditional
4739  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4740  // where the conditional bit0 is zero, the instruction post-processing
4741  // will adjust the mask accordingly.
4742  if (Mnemonic == "it") {
4743    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4744    if (ITMask.size() > 3) {
4745      Parser.EatToEndOfStatement();
4746      return Error(Loc, "too many conditions on IT instruction");
4747    }
4748    unsigned Mask = 8;
4749    for (unsigned i = ITMask.size(); i != 0; --i) {
4750      char pos = ITMask[i - 1];
4751      if (pos != 't' && pos != 'e') {
4752        Parser.EatToEndOfStatement();
4753        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4754      }
4755      Mask >>= 1;
4756      if (ITMask[i - 1] == 't')
4757        Mask |= 8;
4758    }
4759    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4760  }
4761
4762  // FIXME: This is all a pretty gross hack. We should automatically handle
4763  // optional operands like this via tblgen.
4764
4765  // Next, add the CCOut and ConditionCode operands, if needed.
4766  //
4767  // For mnemonics which can ever incorporate a carry setting bit or predication
4768  // code, our matching model involves us always generating CCOut and
4769  // ConditionCode operands to match the mnemonic "as written" and then we let
4770  // the matcher deal with finding the right instruction or generating an
4771  // appropriate error.
4772  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4773  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4774
4775  // If we had a carry-set on an instruction that can't do that, issue an
4776  // error.
4777  if (!CanAcceptCarrySet && CarrySetting) {
4778    Parser.EatToEndOfStatement();
4779    return Error(NameLoc, "instruction '" + Mnemonic +
4780                 "' can not set flags, but 's' suffix specified");
4781  }
4782  // If we had a predication code on an instruction that can't do that, issue an
4783  // error.
4784  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4785    Parser.EatToEndOfStatement();
4786    return Error(NameLoc, "instruction '" + Mnemonic +
4787                 "' is not predicable, but condition code specified");
4788  }
4789
4790  // Add the carry setting operand, if necessary.
4791  if (CanAcceptCarrySet) {
4792    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4793    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4794                                               Loc));
4795  }
4796
4797  // Add the predication code operand, if necessary.
4798  if (CanAcceptPredicationCode) {
4799    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4800                                      CarrySetting);
4801    Operands.push_back(ARMOperand::CreateCondCode(
4802                         ARMCC::CondCodes(PredicationCode), Loc));
4803  }
4804
4805  // Add the processor imod operand, if necessary.
4806  if (ProcessorIMod) {
4807    Operands.push_back(ARMOperand::CreateImm(
4808          MCConstantExpr::Create(ProcessorIMod, getContext()),
4809                                 NameLoc, NameLoc));
4810  }
4811
4812  // Add the remaining tokens in the mnemonic.
4813  while (Next != StringRef::npos) {
4814    Start = Next;
4815    Next = Name.find('.', Start + 1);
4816    StringRef ExtraToken = Name.slice(Start, Next);
4817
4818    // Some NEON instructions have an optional datatype suffix that is
4819    // completely ignored. Check for that.
4820    if (isDataTypeToken(ExtraToken) &&
4821        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4822      continue;
4823
4824    if (ExtraToken != ".n") {
4825      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4826      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4827    }
4828  }
4829
4830  // Read the remaining operands.
4831  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4832    // Read the first operand.
4833    if (parseOperand(Operands, Mnemonic)) {
4834      Parser.EatToEndOfStatement();
4835      return true;
4836    }
4837
4838    while (getLexer().is(AsmToken::Comma)) {
4839      Parser.Lex();  // Eat the comma.
4840
4841      // Parse and remember the operand.
4842      if (parseOperand(Operands, Mnemonic)) {
4843        Parser.EatToEndOfStatement();
4844        return true;
4845      }
4846    }
4847  }
4848
4849  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4850    SMLoc Loc = getLexer().getLoc();
4851    Parser.EatToEndOfStatement();
4852    return Error(Loc, "unexpected token in argument list");
4853  }
4854
4855  Parser.Lex(); // Consume the EndOfStatement
4856
4857  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4858  // do and don't have a cc_out optional-def operand. With some spot-checks
4859  // of the operand list, we can figure out which variant we're trying to
4860  // parse and adjust accordingly before actually matching. We shouldn't ever
4861  // try to remove a cc_out operand that was explicitly set on the the
4862  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4863  // table driven matcher doesn't fit well with the ARM instruction set.
4864  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4865    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4866    Operands.erase(Operands.begin() + 1);
4867    delete Op;
4868  }
4869
4870  // ARM mode 'blx' need special handling, as the register operand version
4871  // is predicable, but the label operand version is not. So, we can't rely
4872  // on the Mnemonic based checking to correctly figure out when to put
4873  // a k_CondCode operand in the list. If we're trying to match the label
4874  // version, remove the k_CondCode operand here.
4875  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4876      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4877    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4878    Operands.erase(Operands.begin() + 1);
4879    delete Op;
4880  }
4881
4882  // The vector-compare-to-zero instructions have a literal token "#0" at
4883  // the end that comes to here as an immediate operand. Convert it to a
4884  // token to play nicely with the matcher.
4885  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4886      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4887      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4888    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4889    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4890    if (CE && CE->getValue() == 0) {
4891      Operands.erase(Operands.begin() + 5);
4892      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4893      delete Op;
4894    }
4895  }
4896  // VCMP{E} does the same thing, but with a different operand count.
4897  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4898      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4899    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4900    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4901    if (CE && CE->getValue() == 0) {
4902      Operands.erase(Operands.begin() + 4);
4903      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4904      delete Op;
4905    }
4906  }
4907  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4908  // end. Convert it to a token here. Take care not to convert those
4909  // that should hit the Thumb2 encoding.
4910  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4911      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4912      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4913      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4914    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4915    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4916    if (CE && CE->getValue() == 0 &&
4917        (isThumbOne() ||
4918         // The cc_out operand matches the IT block.
4919         ((inITBlock() != CarrySetting) &&
4920         // Neither register operand is a high register.
4921         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4922          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4923      Operands.erase(Operands.begin() + 5);
4924      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4925      delete Op;
4926    }
4927  }
4928
4929  return false;
4930}
4931
4932// Validate context-sensitive operand constraints.
4933
4934// return 'true' if register list contains non-low GPR registers,
4935// 'false' otherwise. If Reg is in the register list or is HiReg, set
4936// 'containsReg' to true.
4937static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4938                                 unsigned HiReg, bool &containsReg) {
4939  containsReg = false;
4940  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4941    unsigned OpReg = Inst.getOperand(i).getReg();
4942    if (OpReg == Reg)
4943      containsReg = true;
4944    // Anything other than a low register isn't legal here.
4945    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4946      return true;
4947  }
4948  return false;
4949}
4950
4951// Check if the specified regisgter is in the register list of the inst,
4952// starting at the indicated operand number.
4953static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4954  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4955    unsigned OpReg = Inst.getOperand(i).getReg();
4956    if (OpReg == Reg)
4957      return true;
4958  }
4959  return false;
4960}
4961
4962// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4963// the ARMInsts array) instead. Getting that here requires awkward
4964// API changes, though. Better way?
4965namespace llvm {
4966extern const MCInstrDesc ARMInsts[];
4967}
4968static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4969  return ARMInsts[Opcode];
4970}
4971
4972// FIXME: We would really like to be able to tablegen'erate this.
4973bool ARMAsmParser::
4974validateInstruction(MCInst &Inst,
4975                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4976  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4977  SMLoc Loc = Operands[0]->getStartLoc();
4978  // Check the IT block state first.
4979  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4980  // being allowed in IT blocks, but not being predicable.  It just always
4981  // executes.
4982  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4983    unsigned bit = 1;
4984    if (ITState.FirstCond)
4985      ITState.FirstCond = false;
4986    else
4987      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4988    // The instruction must be predicable.
4989    if (!MCID.isPredicable())
4990      return Error(Loc, "instructions in IT block must be predicable");
4991    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4992    unsigned ITCond = bit ? ITState.Cond :
4993      ARMCC::getOppositeCondition(ITState.Cond);
4994    if (Cond != ITCond) {
4995      // Find the condition code Operand to get its SMLoc information.
4996      SMLoc CondLoc;
4997      for (unsigned i = 1; i < Operands.size(); ++i)
4998        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4999          CondLoc = Operands[i]->getStartLoc();
5000      return Error(CondLoc, "incorrect condition in IT block; got '" +
5001                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5002                   "', but expected '" +
5003                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5004    }
5005  // Check for non-'al' condition codes outside of the IT block.
5006  } else if (isThumbTwo() && MCID.isPredicable() &&
5007             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5008             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5009             Inst.getOpcode() != ARM::t2B)
5010    return Error(Loc, "predicated instructions must be in IT block");
5011
5012  switch (Inst.getOpcode()) {
5013  case ARM::LDRD:
5014  case ARM::LDRD_PRE:
5015  case ARM::LDRD_POST:
5016  case ARM::LDREXD: {
5017    // Rt2 must be Rt + 1.
5018    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5019    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5020    if (Rt2 != Rt + 1)
5021      return Error(Operands[3]->getStartLoc(),
5022                   "destination operands must be sequential");
5023    return false;
5024  }
5025  case ARM::STRD: {
5026    // Rt2 must be Rt + 1.
5027    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5028    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5029    if (Rt2 != Rt + 1)
5030      return Error(Operands[3]->getStartLoc(),
5031                   "source operands must be sequential");
5032    return false;
5033  }
5034  case ARM::STRD_PRE:
5035  case ARM::STRD_POST:
5036  case ARM::STREXD: {
5037    // Rt2 must be Rt + 1.
5038    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5039    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5040    if (Rt2 != Rt + 1)
5041      return Error(Operands[3]->getStartLoc(),
5042                   "source operands must be sequential");
5043    return false;
5044  }
5045  case ARM::SBFX:
5046  case ARM::UBFX: {
5047    // width must be in range [1, 32-lsb]
5048    unsigned lsb = Inst.getOperand(2).getImm();
5049    unsigned widthm1 = Inst.getOperand(3).getImm();
5050    if (widthm1 >= 32 - lsb)
5051      return Error(Operands[5]->getStartLoc(),
5052                   "bitfield width must be in range [1,32-lsb]");
5053    return false;
5054  }
5055  case ARM::tLDMIA: {
5056    // If we're parsing Thumb2, the .w variant is available and handles
5057    // most cases that are normally illegal for a Thumb1 LDM
5058    // instruction. We'll make the transformation in processInstruction()
5059    // if necessary.
5060    //
5061    // Thumb LDM instructions are writeback iff the base register is not
5062    // in the register list.
5063    unsigned Rn = Inst.getOperand(0).getReg();
5064    bool hasWritebackToken =
5065      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5066       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5067    bool listContainsBase;
5068    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5069      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5070                   "registers must be in range r0-r7");
5071    // If we should have writeback, then there should be a '!' token.
5072    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5073      return Error(Operands[2]->getStartLoc(),
5074                   "writeback operator '!' expected");
5075    // If we should not have writeback, there must not be a '!'. This is
5076    // true even for the 32-bit wide encodings.
5077    if (listContainsBase && hasWritebackToken)
5078      return Error(Operands[3]->getStartLoc(),
5079                   "writeback operator '!' not allowed when base register "
5080                   "in register list");
5081
5082    break;
5083  }
5084  case ARM::t2LDMIA_UPD: {
5085    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5086      return Error(Operands[4]->getStartLoc(),
5087                   "writeback operator '!' not allowed when base register "
5088                   "in register list");
5089    break;
5090  }
5091  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5092  // so only issue a diagnostic for thumb1. The instructions will be
5093  // switched to the t2 encodings in processInstruction() if necessary.
5094  case ARM::tPOP: {
5095    bool listContainsBase;
5096    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5097        !isThumbTwo())
5098      return Error(Operands[2]->getStartLoc(),
5099                   "registers must be in range r0-r7 or pc");
5100    break;
5101  }
5102  case ARM::tPUSH: {
5103    bool listContainsBase;
5104    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5105        !isThumbTwo())
5106      return Error(Operands[2]->getStartLoc(),
5107                   "registers must be in range r0-r7 or lr");
5108    break;
5109  }
5110  case ARM::tSTMIA_UPD: {
5111    bool listContainsBase;
5112    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5113      return Error(Operands[4]->getStartLoc(),
5114                   "registers must be in range r0-r7");
5115    break;
5116  }
5117  }
5118
5119  return false;
5120}
5121
5122static unsigned getRealVSTLNOpcode(unsigned Opc, unsigned &Spacing) {
5123  switch(Opc) {
5124  default: assert(0 && "unexpected opcode!");
5125  // VST1LN
5126  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5127  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5128  case ARM::VST1LNdWB_fixed_Asm_U8:
5129    Spacing = 1;
5130    return ARM::VST1LNd8_UPD;
5131  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5132  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5133  case ARM::VST1LNdWB_fixed_Asm_U16:
5134    Spacing = 1;
5135    return ARM::VST1LNd16_UPD;
5136  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5137  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5138  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5139    Spacing = 1;
5140    return ARM::VST1LNd32_UPD;
5141  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5142  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5143  case ARM::VST1LNdWB_register_Asm_U8:
5144    Spacing = 1;
5145    return ARM::VST1LNd8_UPD;
5146  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5147  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5148  case ARM::VST1LNdWB_register_Asm_U16:
5149    Spacing = 1;
5150    return ARM::VST1LNd16_UPD;
5151  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5152  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5153  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5154    Spacing = 1;
5155    return ARM::VST1LNd32_UPD;
5156  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5157  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5158  case ARM::VST1LNdAsm_U8:
5159    Spacing = 1;
5160    return ARM::VST1LNd8;
5161  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5162  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5163  case ARM::VST1LNdAsm_U16:
5164    Spacing = 1;
5165    return ARM::VST1LNd16;
5166  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5167  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5168  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5169    Spacing = 1;
5170    return ARM::VST1LNd32;
5171
5172  // VST2LN
5173  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5174  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5175  case ARM::VST2LNdWB_fixed_Asm_U8:
5176    Spacing = 1;
5177    return ARM::VST2LNd8_UPD;
5178  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5179  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5180  case ARM::VST2LNdWB_fixed_Asm_U16:
5181    Spacing = 1;
5182    return ARM::VST2LNd16_UPD;
5183  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5184  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5185  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5186    Spacing = 1;
5187    return ARM::VST2LNd32_UPD;
5188  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5189  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5190  case ARM::VST2LNqWB_fixed_Asm_U16:
5191    Spacing = 2;
5192    return ARM::VST2LNq16_UPD;
5193  case ARM::VST2LNqWB_fixed_Asm_32:  case ARM::VST2LNqWB_fixed_Asm_F:
5194  case ARM::VST2LNqWB_fixed_Asm_F32: case ARM::VST2LNqWB_fixed_Asm_I32:
5195  case ARM::VST2LNqWB_fixed_Asm_S32: case ARM::VST2LNqWB_fixed_Asm_U32:
5196    Spacing = 2;
5197    return ARM::VST2LNq32_UPD;
5198
5199  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5200  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5201  case ARM::VST2LNdWB_register_Asm_U8:
5202    Spacing = 1;
5203    return ARM::VST2LNd8_UPD;
5204  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5205  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5206  case ARM::VST2LNdWB_register_Asm_U16:
5207    Spacing = 1;
5208    return ARM::VST2LNd16_UPD;
5209  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5210  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5211  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5212    Spacing = 1;
5213    return ARM::VST2LNd32_UPD;
5214  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5215  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5216  case ARM::VST2LNqWB_register_Asm_U16:
5217    Spacing = 2;
5218    return ARM::VST2LNq16_UPD;
5219  case ARM::VST2LNqWB_register_Asm_32:  case ARM::VST2LNqWB_register_Asm_F:
5220  case ARM::VST2LNqWB_register_Asm_F32: case ARM::VST2LNqWB_register_Asm_I32:
5221  case ARM::VST2LNqWB_register_Asm_S32: case ARM::VST2LNqWB_register_Asm_U32:
5222    Spacing = 2;
5223    return ARM::VST2LNq32_UPD;
5224
5225  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5226  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5227  case ARM::VST2LNdAsm_U8:
5228    Spacing = 1;
5229    return ARM::VST2LNd8;
5230  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5231  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5232  case ARM::VST2LNdAsm_U16:
5233    Spacing = 1;
5234    return ARM::VST2LNd16;
5235  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5236  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5237  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5238    Spacing = 1;
5239    return ARM::VST2LNd32;
5240  case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5241  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16:
5242  case ARM::VST2LNqAsm_U16:
5243    Spacing = 2;
5244    return ARM::VST2LNq16;
5245  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:
5246  case ARM::VST2LNqAsm_F32: case ARM::VST2LNqAsm_I32:
5247  case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:
5248    Spacing = 2;
5249    return ARM::VST2LNq32;
5250  }
5251}
5252
5253static unsigned getRealVLDLNOpcode(unsigned Opc, unsigned &Spacing) {
5254  switch(Opc) {
5255  default: assert(0 && "unexpected opcode!");
5256  // VLD1LN
5257  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5258  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5259  case ARM::VLD1LNdWB_fixed_Asm_U8:
5260    Spacing = 1;
5261    return ARM::VLD1LNd8_UPD;
5262  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5263  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5264  case ARM::VLD1LNdWB_fixed_Asm_U16:
5265    Spacing = 1;
5266    return ARM::VLD1LNd16_UPD;
5267  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5268  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5269  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5270    Spacing = 1;
5271    return ARM::VLD1LNd32_UPD;
5272  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5273  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5274  case ARM::VLD1LNdWB_register_Asm_U8:
5275    Spacing = 1;
5276    return ARM::VLD1LNd8_UPD;
5277  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5278  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5279  case ARM::VLD1LNdWB_register_Asm_U16:
5280    Spacing = 1;
5281    return ARM::VLD1LNd16_UPD;
5282  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5283  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5284  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5285    Spacing = 1;
5286    return ARM::VLD1LNd32_UPD;
5287  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5288  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5289  case ARM::VLD1LNdAsm_U8:
5290    Spacing = 1;
5291    return ARM::VLD1LNd8;
5292  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5293  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5294  case ARM::VLD1LNdAsm_U16:
5295    Spacing = 1;
5296    return ARM::VLD1LNd16;
5297  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5298  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5299  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5300    Spacing = 1;
5301    return ARM::VLD1LNd32;
5302
5303  // VLD2LN
5304  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5305  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5306  case ARM::VLD2LNdWB_fixed_Asm_U8:
5307    Spacing = 1;
5308    return ARM::VLD2LNd8_UPD;
5309  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5310  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5311  case ARM::VLD2LNdWB_fixed_Asm_U16:
5312    Spacing = 1;
5313    return ARM::VLD2LNd16_UPD;
5314  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5315  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5316  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5317    Spacing = 1;
5318    return ARM::VLD2LNd32_UPD;
5319  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5320  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5321  case ARM::VLD2LNqWB_fixed_Asm_U16:
5322    Spacing = 1;
5323    return ARM::VLD2LNq16_UPD;
5324  case ARM::VLD2LNqWB_fixed_Asm_32:  case ARM::VLD2LNqWB_fixed_Asm_F:
5325  case ARM::VLD2LNqWB_fixed_Asm_F32: case ARM::VLD2LNqWB_fixed_Asm_I32:
5326  case ARM::VLD2LNqWB_fixed_Asm_S32: case ARM::VLD2LNqWB_fixed_Asm_U32:
5327    Spacing = 2;
5328    return ARM::VLD2LNq32_UPD;
5329  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5330  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5331  case ARM::VLD2LNdWB_register_Asm_U8:
5332    Spacing = 1;
5333    return ARM::VLD2LNd8_UPD;
5334  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5335  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5336  case ARM::VLD2LNdWB_register_Asm_U16:
5337    Spacing = 1;
5338    return ARM::VLD2LNd16_UPD;
5339  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5340  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5341  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5342    Spacing = 1;
5343    return ARM::VLD2LNd32_UPD;
5344  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5345  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5346  case ARM::VLD2LNqWB_register_Asm_U16:
5347    Spacing = 2;
5348    return ARM::VLD2LNq16_UPD;
5349  case ARM::VLD2LNqWB_register_Asm_32:  case ARM::VLD2LNqWB_register_Asm_F:
5350  case ARM::VLD2LNqWB_register_Asm_F32: case ARM::VLD2LNqWB_register_Asm_I32:
5351  case ARM::VLD2LNqWB_register_Asm_S32: case ARM::VLD2LNqWB_register_Asm_U32:
5352    Spacing = 2;
5353    return ARM::VLD2LNq32_UPD;
5354  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5355  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5356  case ARM::VLD2LNdAsm_U8:
5357    Spacing = 1;
5358    return ARM::VLD2LNd8;
5359  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5360  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5361  case ARM::VLD2LNdAsm_U16:
5362    Spacing = 1;
5363    return ARM::VLD2LNd16;
5364  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5365  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5366  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5367    Spacing = 1;
5368    return ARM::VLD2LNd32;
5369  case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5370  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16:
5371  case ARM::VLD2LNqAsm_U16:
5372    Spacing = 2;
5373    return ARM::VLD2LNq16;
5374  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:
5375  case ARM::VLD2LNqAsm_F32: case ARM::VLD2LNqAsm_I32:
5376  case ARM::VLD2LNqAsm_S32: case ARM::VLD2LNqAsm_U32:
5377    Spacing = 2;
5378    return ARM::VLD2LNq32;
5379  }
5380}
5381
5382bool ARMAsmParser::
5383processInstruction(MCInst &Inst,
5384                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5385  switch (Inst.getOpcode()) {
5386  // Handle NEON VST complex aliases.
5387  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5388  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5389  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5390  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5391  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5392  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5393  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5394  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5395    MCInst TmpInst;
5396    // Shuffle the operands around so the lane index operand is in the
5397    // right place.
5398    unsigned Spacing;
5399    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5400    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5401    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5402    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5403    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5404    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5405    TmpInst.addOperand(Inst.getOperand(1)); // lane
5406    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5407    TmpInst.addOperand(Inst.getOperand(6));
5408    Inst = TmpInst;
5409    return true;
5410  }
5411
5412  case ARM::VST2LNdWB_register_Asm_8:   case ARM::VST2LNdWB_register_Asm_P8:
5413  case ARM::VST2LNdWB_register_Asm_I8:  case ARM::VST2LNdWB_register_Asm_S8:
5414  case ARM::VST2LNdWB_register_Asm_U8:  case ARM::VST2LNdWB_register_Asm_16:
5415  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5416  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5417  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5418  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5419  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5420  case ARM::VST2LNqWB_register_Asm_16:  case ARM::VST2LNqWB_register_Asm_P16:
5421  case ARM::VST2LNqWB_register_Asm_I16: case ARM::VST2LNqWB_register_Asm_S16:
5422  case ARM::VST2LNqWB_register_Asm_U16: case ARM::VST2LNqWB_register_Asm_32:
5423  case ARM::VST2LNqWB_register_Asm_F:   case ARM::VST2LNqWB_register_Asm_F32:
5424  case ARM::VST2LNqWB_register_Asm_I32: case ARM::VST2LNqWB_register_Asm_S32:
5425  case ARM::VST2LNqWB_register_Asm_U32: {
5426    MCInst TmpInst;
5427    // Shuffle the operands around so the lane index operand is in the
5428    // right place.
5429    unsigned Spacing;
5430    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5431    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5432    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5433    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5434    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5435    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5436    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5437                                            Spacing));
5438    TmpInst.addOperand(Inst.getOperand(1)); // lane
5439    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5440    TmpInst.addOperand(Inst.getOperand(6));
5441    Inst = TmpInst;
5442    return true;
5443  }
5444  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5445  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5446  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5447  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5448  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5449  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5450  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5451  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5452    MCInst TmpInst;
5453    // Shuffle the operands around so the lane index operand is in the
5454    // right place.
5455    unsigned Spacing;
5456    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5457    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5458    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5459    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5460    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5461    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5462    TmpInst.addOperand(Inst.getOperand(1)); // lane
5463    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5464    TmpInst.addOperand(Inst.getOperand(5));
5465    Inst = TmpInst;
5466    return true;
5467  }
5468
5469  case ARM::VST2LNdWB_fixed_Asm_8:   case ARM::VST2LNdWB_fixed_Asm_P8:
5470  case ARM::VST2LNdWB_fixed_Asm_I8:  case ARM::VST2LNdWB_fixed_Asm_S8:
5471  case ARM::VST2LNdWB_fixed_Asm_U8:  case ARM::VST2LNdWB_fixed_Asm_16:
5472  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5473  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5474  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5475  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5476  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5477  case ARM::VST2LNqWB_fixed_Asm_16:  case ARM::VST2LNqWB_fixed_Asm_P16:
5478  case ARM::VST2LNqWB_fixed_Asm_I16: case ARM::VST2LNqWB_fixed_Asm_S16:
5479  case ARM::VST2LNqWB_fixed_Asm_U16: case ARM::VST2LNqWB_fixed_Asm_32:
5480  case ARM::VST2LNqWB_fixed_Asm_F:   case ARM::VST2LNqWB_fixed_Asm_F32:
5481  case ARM::VST2LNqWB_fixed_Asm_I32: case ARM::VST2LNqWB_fixed_Asm_S32:
5482  case ARM::VST2LNqWB_fixed_Asm_U32: {
5483    MCInst TmpInst;
5484    // Shuffle the operands around so the lane index operand is in the
5485    // right place.
5486    unsigned Spacing;
5487    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5488    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5489    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5490    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5491    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5492    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5493    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5494                                            Spacing));
5495    TmpInst.addOperand(Inst.getOperand(1)); // lane
5496    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5497    TmpInst.addOperand(Inst.getOperand(5));
5498    Inst = TmpInst;
5499    return true;
5500  }
5501  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5502  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5503  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5504  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5505  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5506  case ARM::VST1LNdAsm_U32: {
5507    MCInst TmpInst;
5508    // Shuffle the operands around so the lane index operand is in the
5509    // right place.
5510    unsigned Spacing;
5511    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5512    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5513    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5514    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5515    TmpInst.addOperand(Inst.getOperand(1)); // lane
5516    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5517    TmpInst.addOperand(Inst.getOperand(5));
5518    Inst = TmpInst;
5519    return true;
5520  }
5521
5522  case ARM::VST2LNdAsm_8:   case ARM::VST2LNdAsm_P8:  case ARM::VST2LNdAsm_I8:
5523  case ARM::VST2LNdAsm_S8:  case ARM::VST2LNdAsm_U8:  case ARM::VST2LNdAsm_16:
5524  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5525  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5526  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5527  case ARM::VST2LNdAsm_U32: case ARM::VST2LNqAsm_16:  case ARM::VST2LNqAsm_P16:
5528  case ARM::VST2LNqAsm_I16: case ARM::VST2LNqAsm_S16: case ARM::VST2LNqAsm_U16:
5529  case ARM::VST2LNqAsm_32:  case ARM::VST2LNqAsm_F:   case ARM::VST2LNqAsm_F32:
5530  case ARM::VST2LNqAsm_I32: case ARM::VST2LNqAsm_S32: case ARM::VST2LNqAsm_U32:{
5531    MCInst TmpInst;
5532    // Shuffle the operands around so the lane index operand is in the
5533    // right place.
5534    unsigned Spacing;
5535    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode(), Spacing));
5536    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5537    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5538    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5539    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5540                                            Spacing));
5541    TmpInst.addOperand(Inst.getOperand(1)); // lane
5542    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5543    TmpInst.addOperand(Inst.getOperand(5));
5544    Inst = TmpInst;
5545    return true;
5546  }
5547  // Handle NEON VLD complex aliases.
5548  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5549  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5550  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5551  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5552  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5553  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5554  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5555  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5556    MCInst TmpInst;
5557    // Shuffle the operands around so the lane index operand is in the
5558    // right place.
5559    unsigned Spacing;
5560    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5561    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5562    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5563    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5564    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5565    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5566    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5567    TmpInst.addOperand(Inst.getOperand(1)); // lane
5568    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5569    TmpInst.addOperand(Inst.getOperand(6));
5570    Inst = TmpInst;
5571    return true;
5572  }
5573
5574  case ARM::VLD2LNdWB_register_Asm_8:   case ARM::VLD2LNdWB_register_Asm_P8:
5575  case ARM::VLD2LNdWB_register_Asm_I8:  case ARM::VLD2LNdWB_register_Asm_S8:
5576  case ARM::VLD2LNdWB_register_Asm_U8:  case ARM::VLD2LNdWB_register_Asm_16:
5577  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5578  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5579  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5580  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5581  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5582  case ARM::VLD2LNqWB_register_Asm_16:  case ARM::VLD2LNqWB_register_Asm_P16:
5583  case ARM::VLD2LNqWB_register_Asm_I16: case ARM::VLD2LNqWB_register_Asm_S16:
5584  case ARM::VLD2LNqWB_register_Asm_U16: case ARM::VLD2LNqWB_register_Asm_32:
5585  case ARM::VLD2LNqWB_register_Asm_F:   case ARM::VLD2LNqWB_register_Asm_F32:
5586  case ARM::VLD2LNqWB_register_Asm_I32: case ARM::VLD2LNqWB_register_Asm_S32:
5587  case ARM::VLD2LNqWB_register_Asm_U32: {
5588    MCInst TmpInst;
5589    // Shuffle the operands around so the lane index operand is in the
5590    // right place.
5591    unsigned Spacing;
5592    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5593    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5594    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5595                                            Spacing));
5596    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5597    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5598    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5599    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5600    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5601    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5602                                            Spacing));
5603    TmpInst.addOperand(Inst.getOperand(1)); // lane
5604    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5605    TmpInst.addOperand(Inst.getOperand(6));
5606    Inst = TmpInst;
5607    return true;
5608  }
5609
5610  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5611  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5612  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5613  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5614  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5615  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5616  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5617  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5618    MCInst TmpInst;
5619    // Shuffle the operands around so the lane index operand is in the
5620    // right place.
5621    unsigned Spacing;
5622    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5623    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5624    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5625    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5626    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5627    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5628    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5629    TmpInst.addOperand(Inst.getOperand(1)); // lane
5630    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5631    TmpInst.addOperand(Inst.getOperand(5));
5632    Inst = TmpInst;
5633    return true;
5634  }
5635
5636  case ARM::VLD2LNdWB_fixed_Asm_8:   case ARM::VLD2LNdWB_fixed_Asm_P8:
5637  case ARM::VLD2LNdWB_fixed_Asm_I8:  case ARM::VLD2LNdWB_fixed_Asm_S8:
5638  case ARM::VLD2LNdWB_fixed_Asm_U8:  case ARM::VLD2LNdWB_fixed_Asm_16:
5639  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5640  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5641  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5642  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5643  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5644  case ARM::VLD2LNqWB_fixed_Asm_16:  case ARM::VLD2LNqWB_fixed_Asm_P16:
5645  case ARM::VLD2LNqWB_fixed_Asm_I16: case ARM::VLD2LNqWB_fixed_Asm_S16:
5646  case ARM::VLD2LNqWB_fixed_Asm_U16: case ARM::VLD2LNqWB_fixed_Asm_32:
5647  case ARM::VLD2LNqWB_fixed_Asm_F:   case ARM::VLD2LNqWB_fixed_Asm_F32:
5648  case ARM::VLD2LNqWB_fixed_Asm_I32: case ARM::VLD2LNqWB_fixed_Asm_S32:
5649  case ARM::VLD2LNqWB_fixed_Asm_U32: {
5650    MCInst TmpInst;
5651    // Shuffle the operands around so the lane index operand is in the
5652    // right place.
5653    unsigned Spacing;
5654    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5655    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5656    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5657                                            Spacing));
5658    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5659    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5660    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5661    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5662    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5663    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5664                                            Spacing));
5665    TmpInst.addOperand(Inst.getOperand(1)); // lane
5666    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5667    TmpInst.addOperand(Inst.getOperand(5));
5668    Inst = TmpInst;
5669    return true;
5670  }
5671
5672  case ARM::VLD1LNdAsm_8:   case ARM::VLD1LNdAsm_P8:  case ARM::VLD1LNdAsm_I8:
5673  case ARM::VLD1LNdAsm_S8:  case ARM::VLD1LNdAsm_U8:  case ARM::VLD1LNdAsm_16:
5674  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5675  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5676  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5677  case ARM::VLD1LNdAsm_U32: {
5678    MCInst TmpInst;
5679    // Shuffle the operands around so the lane index operand is in the
5680    // right place.
5681    unsigned Spacing;
5682    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5683    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5684    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5685    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5686    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5687    TmpInst.addOperand(Inst.getOperand(1)); // lane
5688    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5689    TmpInst.addOperand(Inst.getOperand(5));
5690    Inst = TmpInst;
5691    return true;
5692  }
5693
5694  case ARM::VLD2LNdAsm_8:   case ARM::VLD2LNdAsm_P8:  case ARM::VLD2LNdAsm_I8:
5695  case ARM::VLD2LNdAsm_S8:  case ARM::VLD2LNdAsm_U8:  case ARM::VLD2LNdAsm_16:
5696  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5697  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5698  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5699  case ARM::VLD2LNdAsm_U32: case ARM::VLD2LNqAsm_16:  case ARM::VLD2LNqAsm_P16:
5700  case ARM::VLD2LNqAsm_I16: case ARM::VLD2LNqAsm_S16: case ARM::VLD2LNqAsm_U16:
5701  case ARM::VLD2LNqAsm_32:  case ARM::VLD2LNqAsm_F:   case ARM::VLD2LNqAsm_F32:
5702  case ARM::VLD2LNqAsm_I32: case ARM::VLD2LNqAsm_S32:
5703  case ARM::VLD2LNqAsm_U32: {
5704    MCInst TmpInst;
5705    // Shuffle the operands around so the lane index operand is in the
5706    // right place.
5707    unsigned Spacing;
5708    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode(), Spacing));
5709    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5710    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5711                                            Spacing));
5712    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5713    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5714    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5715    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5716                                            Spacing));
5717    TmpInst.addOperand(Inst.getOperand(1)); // lane
5718    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5719    TmpInst.addOperand(Inst.getOperand(5));
5720    Inst = TmpInst;
5721    return true;
5722  }
5723  // Handle the Thumb2 mode MOV complex aliases.
5724  case ARM::t2MOVsi:
5725  case ARM::t2MOVSsi: {
5726    // Which instruction to expand to depends on the CCOut operand and
5727    // whether we're in an IT block if the register operands are low
5728    // registers.
5729    bool isNarrow = false;
5730    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5731        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5732        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5733      isNarrow = true;
5734    MCInst TmpInst;
5735    unsigned newOpc;
5736    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5737    default: llvm_unreachable("unexpected opcode!");
5738    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5739    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5740    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5741    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5742    }
5743    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5744    if (Ammount == 32) Ammount = 0;
5745    TmpInst.setOpcode(newOpc);
5746    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5747    if (isNarrow)
5748      TmpInst.addOperand(MCOperand::CreateReg(
5749          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5750    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5751    TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5752    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5753    TmpInst.addOperand(Inst.getOperand(4));
5754    if (!isNarrow)
5755      TmpInst.addOperand(MCOperand::CreateReg(
5756          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5757    Inst = TmpInst;
5758    return true;
5759  }
5760  // Handle the ARM mode MOV complex aliases.
5761  case ARM::ASRr:
5762  case ARM::LSRr:
5763  case ARM::LSLr:
5764  case ARM::RORr: {
5765    ARM_AM::ShiftOpc ShiftTy;
5766    switch(Inst.getOpcode()) {
5767    default: llvm_unreachable("unexpected opcode!");
5768    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5769    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5770    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5771    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5772    }
5773    // A shift by zero is a plain MOVr, not a MOVsi.
5774    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5775    MCInst TmpInst;
5776    TmpInst.setOpcode(ARM::MOVsr);
5777    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5778    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5779    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5780    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5781    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5782    TmpInst.addOperand(Inst.getOperand(4));
5783    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5784    Inst = TmpInst;
5785    return true;
5786  }
5787  case ARM::ASRi:
5788  case ARM::LSRi:
5789  case ARM::LSLi:
5790  case ARM::RORi: {
5791    ARM_AM::ShiftOpc ShiftTy;
5792    switch(Inst.getOpcode()) {
5793    default: llvm_unreachable("unexpected opcode!");
5794    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5795    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5796    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5797    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5798    }
5799    // A shift by zero is a plain MOVr, not a MOVsi.
5800    unsigned Amt = Inst.getOperand(2).getImm();
5801    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5802    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5803    MCInst TmpInst;
5804    TmpInst.setOpcode(Opc);
5805    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5806    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5807    if (Opc == ARM::MOVsi)
5808      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5809    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5810    TmpInst.addOperand(Inst.getOperand(4));
5811    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5812    Inst = TmpInst;
5813    return true;
5814  }
5815  case ARM::RRXi: {
5816    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5817    MCInst TmpInst;
5818    TmpInst.setOpcode(ARM::MOVsi);
5819    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5820    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5821    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5822    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5823    TmpInst.addOperand(Inst.getOperand(3));
5824    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5825    Inst = TmpInst;
5826    return true;
5827  }
5828  case ARM::t2LDMIA_UPD: {
5829    // If this is a load of a single register, then we should use
5830    // a post-indexed LDR instruction instead, per the ARM ARM.
5831    if (Inst.getNumOperands() != 5)
5832      return false;
5833    MCInst TmpInst;
5834    TmpInst.setOpcode(ARM::t2LDR_POST);
5835    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5836    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5837    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5838    TmpInst.addOperand(MCOperand::CreateImm(4));
5839    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5840    TmpInst.addOperand(Inst.getOperand(3));
5841    Inst = TmpInst;
5842    return true;
5843  }
5844  case ARM::t2STMDB_UPD: {
5845    // If this is a store of a single register, then we should use
5846    // a pre-indexed STR instruction instead, per the ARM ARM.
5847    if (Inst.getNumOperands() != 5)
5848      return false;
5849    MCInst TmpInst;
5850    TmpInst.setOpcode(ARM::t2STR_PRE);
5851    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5852    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5853    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5854    TmpInst.addOperand(MCOperand::CreateImm(-4));
5855    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5856    TmpInst.addOperand(Inst.getOperand(3));
5857    Inst = TmpInst;
5858    return true;
5859  }
5860  case ARM::LDMIA_UPD:
5861    // If this is a load of a single register via a 'pop', then we should use
5862    // a post-indexed LDR instruction instead, per the ARM ARM.
5863    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5864        Inst.getNumOperands() == 5) {
5865      MCInst TmpInst;
5866      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5867      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5868      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5869      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5870      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5871      TmpInst.addOperand(MCOperand::CreateImm(4));
5872      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5873      TmpInst.addOperand(Inst.getOperand(3));
5874      Inst = TmpInst;
5875      return true;
5876    }
5877    break;
5878  case ARM::STMDB_UPD:
5879    // If this is a store of a single register via a 'push', then we should use
5880    // a pre-indexed STR instruction instead, per the ARM ARM.
5881    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5882        Inst.getNumOperands() == 5) {
5883      MCInst TmpInst;
5884      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5885      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5886      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5887      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5888      TmpInst.addOperand(MCOperand::CreateImm(-4));
5889      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5890      TmpInst.addOperand(Inst.getOperand(3));
5891      Inst = TmpInst;
5892    }
5893    break;
5894  case ARM::t2ADDri12:
5895    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5896    // mnemonic was used (not "addw"), encoding T3 is preferred.
5897    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5898        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5899      break;
5900    Inst.setOpcode(ARM::t2ADDri);
5901    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5902    break;
5903  case ARM::t2SUBri12:
5904    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5905    // mnemonic was used (not "subw"), encoding T3 is preferred.
5906    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5907        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5908      break;
5909    Inst.setOpcode(ARM::t2SUBri);
5910    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5911    break;
5912  case ARM::tADDi8:
5913    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5914    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5915    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5916    // to encoding T1 if <Rd> is omitted."
5917    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5918      Inst.setOpcode(ARM::tADDi3);
5919      return true;
5920    }
5921    break;
5922  case ARM::tSUBi8:
5923    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5924    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5925    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5926    // to encoding T1 if <Rd> is omitted."
5927    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5928      Inst.setOpcode(ARM::tSUBi3);
5929      return true;
5930    }
5931    break;
5932  case ARM::t2ADDrr: {
5933    // If the destination and first source operand are the same, and
5934    // there's no setting of the flags, use encoding T2 instead of T3.
5935    // Note that this is only for ADD, not SUB. This mirrors the system
5936    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5937    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5938        Inst.getOperand(5).getReg() != 0 ||
5939        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5940         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5941      break;
5942    MCInst TmpInst;
5943    TmpInst.setOpcode(ARM::tADDhirr);
5944    TmpInst.addOperand(Inst.getOperand(0));
5945    TmpInst.addOperand(Inst.getOperand(0));
5946    TmpInst.addOperand(Inst.getOperand(2));
5947    TmpInst.addOperand(Inst.getOperand(3));
5948    TmpInst.addOperand(Inst.getOperand(4));
5949    Inst = TmpInst;
5950    return true;
5951  }
5952  case ARM::tB:
5953    // A Thumb conditional branch outside of an IT block is a tBcc.
5954    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5955      Inst.setOpcode(ARM::tBcc);
5956      return true;
5957    }
5958    break;
5959  case ARM::t2B:
5960    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5961    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5962      Inst.setOpcode(ARM::t2Bcc);
5963      return true;
5964    }
5965    break;
5966  case ARM::t2Bcc:
5967    // If the conditional is AL or we're in an IT block, we really want t2B.
5968    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5969      Inst.setOpcode(ARM::t2B);
5970      return true;
5971    }
5972    break;
5973  case ARM::tBcc:
5974    // If the conditional is AL, we really want tB.
5975    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5976      Inst.setOpcode(ARM::tB);
5977      return true;
5978    }
5979    break;
5980  case ARM::tLDMIA: {
5981    // If the register list contains any high registers, or if the writeback
5982    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
5983    // instead if we're in Thumb2. Otherwise, this should have generated
5984    // an error in validateInstruction().
5985    unsigned Rn = Inst.getOperand(0).getReg();
5986    bool hasWritebackToken =
5987      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5988       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5989    bool listContainsBase;
5990    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
5991        (!listContainsBase && !hasWritebackToken) ||
5992        (listContainsBase && hasWritebackToken)) {
5993      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5994      assert (isThumbTwo());
5995      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
5996      // If we're switching to the updating version, we need to insert
5997      // the writeback tied operand.
5998      if (hasWritebackToken)
5999        Inst.insert(Inst.begin(),
6000                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6001      return true;
6002    }
6003    break;
6004  }
6005  case ARM::tSTMIA_UPD: {
6006    // If the register list contains any high registers, we need to use
6007    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6008    // should have generated an error in validateInstruction().
6009    unsigned Rn = Inst.getOperand(0).getReg();
6010    bool listContainsBase;
6011    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6012      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6013      assert (isThumbTwo());
6014      Inst.setOpcode(ARM::t2STMIA_UPD);
6015      return true;
6016    }
6017    break;
6018  }
6019  case ARM::tPOP: {
6020    bool listContainsBase;
6021    // If the register list contains any high registers, we need to use
6022    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6023    // should have generated an error in validateInstruction().
6024    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6025      return false;
6026    assert (isThumbTwo());
6027    Inst.setOpcode(ARM::t2LDMIA_UPD);
6028    // Add the base register and writeback operands.
6029    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6030    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6031    return true;
6032  }
6033  case ARM::tPUSH: {
6034    bool listContainsBase;
6035    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6036      return false;
6037    assert (isThumbTwo());
6038    Inst.setOpcode(ARM::t2STMDB_UPD);
6039    // Add the base register and writeback operands.
6040    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6041    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6042    return true;
6043  }
6044  case ARM::t2MOVi: {
6045    // If we can use the 16-bit encoding and the user didn't explicitly
6046    // request the 32-bit variant, transform it here.
6047    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6048        Inst.getOperand(1).getImm() <= 255 &&
6049        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6050         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6051        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6052        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6053         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6054      // The operands aren't in the same order for tMOVi8...
6055      MCInst TmpInst;
6056      TmpInst.setOpcode(ARM::tMOVi8);
6057      TmpInst.addOperand(Inst.getOperand(0));
6058      TmpInst.addOperand(Inst.getOperand(4));
6059      TmpInst.addOperand(Inst.getOperand(1));
6060      TmpInst.addOperand(Inst.getOperand(2));
6061      TmpInst.addOperand(Inst.getOperand(3));
6062      Inst = TmpInst;
6063      return true;
6064    }
6065    break;
6066  }
6067  case ARM::t2MOVr: {
6068    // If we can use the 16-bit encoding and the user didn't explicitly
6069    // request the 32-bit variant, transform it here.
6070    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6071        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6072        Inst.getOperand(2).getImm() == ARMCC::AL &&
6073        Inst.getOperand(4).getReg() == ARM::CPSR &&
6074        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6075         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6076      // The operands aren't the same for tMOV[S]r... (no cc_out)
6077      MCInst TmpInst;
6078      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6079      TmpInst.addOperand(Inst.getOperand(0));
6080      TmpInst.addOperand(Inst.getOperand(1));
6081      TmpInst.addOperand(Inst.getOperand(2));
6082      TmpInst.addOperand(Inst.getOperand(3));
6083      Inst = TmpInst;
6084      return true;
6085    }
6086    break;
6087  }
6088  case ARM::t2SXTH:
6089  case ARM::t2SXTB:
6090  case ARM::t2UXTH:
6091  case ARM::t2UXTB: {
6092    // If we can use the 16-bit encoding and the user didn't explicitly
6093    // request the 32-bit variant, transform it here.
6094    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6095        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6096        Inst.getOperand(2).getImm() == 0 &&
6097        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6098         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6099      unsigned NewOpc;
6100      switch (Inst.getOpcode()) {
6101      default: llvm_unreachable("Illegal opcode!");
6102      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6103      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6104      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6105      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6106      }
6107      // The operands aren't the same for thumb1 (no rotate operand).
6108      MCInst TmpInst;
6109      TmpInst.setOpcode(NewOpc);
6110      TmpInst.addOperand(Inst.getOperand(0));
6111      TmpInst.addOperand(Inst.getOperand(1));
6112      TmpInst.addOperand(Inst.getOperand(3));
6113      TmpInst.addOperand(Inst.getOperand(4));
6114      Inst = TmpInst;
6115      return true;
6116    }
6117    break;
6118  }
6119  case ARM::MOVsi: {
6120    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6121    if (SOpc == ARM_AM::rrx) return false;
6122    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6123      // Shifting by zero is accepted as a vanilla 'MOVr'
6124      MCInst TmpInst;
6125      TmpInst.setOpcode(ARM::MOVr);
6126      TmpInst.addOperand(Inst.getOperand(0));
6127      TmpInst.addOperand(Inst.getOperand(1));
6128      TmpInst.addOperand(Inst.getOperand(3));
6129      TmpInst.addOperand(Inst.getOperand(4));
6130      TmpInst.addOperand(Inst.getOperand(5));
6131      Inst = TmpInst;
6132      return true;
6133    }
6134    return false;
6135  }
6136  case ARM::t2IT: {
6137    // The mask bits for all but the first condition are represented as
6138    // the low bit of the condition code value implies 't'. We currently
6139    // always have 1 implies 't', so XOR toggle the bits if the low bit
6140    // of the condition code is zero. The encoding also expects the low
6141    // bit of the condition to be encoded as bit 4 of the mask operand,
6142    // so mask that in if needed
6143    MCOperand &MO = Inst.getOperand(1);
6144    unsigned Mask = MO.getImm();
6145    unsigned OrigMask = Mask;
6146    unsigned TZ = CountTrailingZeros_32(Mask);
6147    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6148      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6149      for (unsigned i = 3; i != TZ; --i)
6150        Mask ^= 1 << i;
6151    } else
6152      Mask |= 0x10;
6153    MO.setImm(Mask);
6154
6155    // Set up the IT block state according to the IT instruction we just
6156    // matched.
6157    assert(!inITBlock() && "nested IT blocks?!");
6158    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6159    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6160    ITState.CurPosition = 0;
6161    ITState.FirstCond = true;
6162    break;
6163  }
6164  }
6165  return false;
6166}
6167
6168unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6169  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6170  // suffix depending on whether they're in an IT block or not.
6171  unsigned Opc = Inst.getOpcode();
6172  const MCInstrDesc &MCID = getInstDesc(Opc);
6173  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6174    assert(MCID.hasOptionalDef() &&
6175           "optionally flag setting instruction missing optional def operand");
6176    assert(MCID.NumOperands == Inst.getNumOperands() &&
6177           "operand count mismatch!");
6178    // Find the optional-def operand (cc_out).
6179    unsigned OpNo;
6180    for (OpNo = 0;
6181         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6182         ++OpNo)
6183      ;
6184    // If we're parsing Thumb1, reject it completely.
6185    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6186      return Match_MnemonicFail;
6187    // If we're parsing Thumb2, which form is legal depends on whether we're
6188    // in an IT block.
6189    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6190        !inITBlock())
6191      return Match_RequiresITBlock;
6192    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6193        inITBlock())
6194      return Match_RequiresNotITBlock;
6195  }
6196  // Some high-register supporting Thumb1 encodings only allow both registers
6197  // to be from r0-r7 when in Thumb2.
6198  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6199           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6200           isARMLowRegister(Inst.getOperand(2).getReg()))
6201    return Match_RequiresThumb2;
6202  // Others only require ARMv6 or later.
6203  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6204           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6205           isARMLowRegister(Inst.getOperand(1).getReg()))
6206    return Match_RequiresV6;
6207  return Match_Success;
6208}
6209
6210bool ARMAsmParser::
6211MatchAndEmitInstruction(SMLoc IDLoc,
6212                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6213                        MCStreamer &Out) {
6214  MCInst Inst;
6215  unsigned ErrorInfo;
6216  unsigned MatchResult;
6217  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6218  switch (MatchResult) {
6219  default: break;
6220  case Match_Success:
6221    // Context sensitive operand constraints aren't handled by the matcher,
6222    // so check them here.
6223    if (validateInstruction(Inst, Operands)) {
6224      // Still progress the IT block, otherwise one wrong condition causes
6225      // nasty cascading errors.
6226      forwardITPosition();
6227      return true;
6228    }
6229
6230    // Some instructions need post-processing to, for example, tweak which
6231    // encoding is selected. Loop on it while changes happen so the
6232    // individual transformations can chain off each other. E.g.,
6233    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6234    while (processInstruction(Inst, Operands))
6235      ;
6236
6237    // Only move forward at the very end so that everything in validate
6238    // and process gets a consistent answer about whether we're in an IT
6239    // block.
6240    forwardITPosition();
6241
6242    Out.EmitInstruction(Inst);
6243    return false;
6244  case Match_MissingFeature:
6245    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6246    return true;
6247  case Match_InvalidOperand: {
6248    SMLoc ErrorLoc = IDLoc;
6249    if (ErrorInfo != ~0U) {
6250      if (ErrorInfo >= Operands.size())
6251        return Error(IDLoc, "too few operands for instruction");
6252
6253      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6254      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6255    }
6256
6257    return Error(ErrorLoc, "invalid operand for instruction");
6258  }
6259  case Match_MnemonicFail:
6260    return Error(IDLoc, "invalid instruction");
6261  case Match_ConversionFail:
6262    // The converter function will have already emited a diagnostic.
6263    return true;
6264  case Match_RequiresNotITBlock:
6265    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6266  case Match_RequiresITBlock:
6267    return Error(IDLoc, "instruction only valid inside IT block");
6268  case Match_RequiresV6:
6269    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6270  case Match_RequiresThumb2:
6271    return Error(IDLoc, "instruction variant requires Thumb2");
6272  }
6273
6274  llvm_unreachable("Implement any new match types added!");
6275  return true;
6276}
6277
6278/// parseDirective parses the arm specific directives
6279bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6280  StringRef IDVal = DirectiveID.getIdentifier();
6281  if (IDVal == ".word")
6282    return parseDirectiveWord(4, DirectiveID.getLoc());
6283  else if (IDVal == ".thumb")
6284    return parseDirectiveThumb(DirectiveID.getLoc());
6285  else if (IDVal == ".arm")
6286    return parseDirectiveARM(DirectiveID.getLoc());
6287  else if (IDVal == ".thumb_func")
6288    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6289  else if (IDVal == ".code")
6290    return parseDirectiveCode(DirectiveID.getLoc());
6291  else if (IDVal == ".syntax")
6292    return parseDirectiveSyntax(DirectiveID.getLoc());
6293  else if (IDVal == ".unreq")
6294    return parseDirectiveUnreq(DirectiveID.getLoc());
6295  else if (IDVal == ".arch")
6296    return parseDirectiveArch(DirectiveID.getLoc());
6297  else if (IDVal == ".eabi_attribute")
6298    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6299  return true;
6300}
6301
6302/// parseDirectiveWord
6303///  ::= .word [ expression (, expression)* ]
6304bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6305  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6306    for (;;) {
6307      const MCExpr *Value;
6308      if (getParser().ParseExpression(Value))
6309        return true;
6310
6311      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6312
6313      if (getLexer().is(AsmToken::EndOfStatement))
6314        break;
6315
6316      // FIXME: Improve diagnostic.
6317      if (getLexer().isNot(AsmToken::Comma))
6318        return Error(L, "unexpected token in directive");
6319      Parser.Lex();
6320    }
6321  }
6322
6323  Parser.Lex();
6324  return false;
6325}
6326
6327/// parseDirectiveThumb
6328///  ::= .thumb
6329bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6330  if (getLexer().isNot(AsmToken::EndOfStatement))
6331    return Error(L, "unexpected token in directive");
6332  Parser.Lex();
6333
6334  if (!isThumb())
6335    SwitchMode();
6336  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6337  return false;
6338}
6339
6340/// parseDirectiveARM
6341///  ::= .arm
6342bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6343  if (getLexer().isNot(AsmToken::EndOfStatement))
6344    return Error(L, "unexpected token in directive");
6345  Parser.Lex();
6346
6347  if (isThumb())
6348    SwitchMode();
6349  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6350  return false;
6351}
6352
6353/// parseDirectiveThumbFunc
6354///  ::= .thumbfunc symbol_name
6355bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6356  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6357  bool isMachO = MAI.hasSubsectionsViaSymbols();
6358  StringRef Name;
6359
6360  // Darwin asm has function name after .thumb_func direction
6361  // ELF doesn't
6362  if (isMachO) {
6363    const AsmToken &Tok = Parser.getTok();
6364    if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6365      return Error(L, "unexpected token in .thumb_func directive");
6366    Name = Tok.getIdentifier();
6367    Parser.Lex(); // Consume the identifier token.
6368  }
6369
6370 if (getLexer().isNot(AsmToken::EndOfStatement))
6371    return Error(L, "unexpected token in directive");
6372  Parser.Lex();
6373
6374  // FIXME: assuming function name will be the line following .thumb_func
6375  if (!isMachO) {
6376    Name = Parser.getTok().getIdentifier();
6377  }
6378
6379  // Mark symbol as a thumb symbol.
6380  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6381  getParser().getStreamer().EmitThumbFunc(Func);
6382  return false;
6383}
6384
6385/// parseDirectiveSyntax
6386///  ::= .syntax unified | divided
6387bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6388  const AsmToken &Tok = Parser.getTok();
6389  if (Tok.isNot(AsmToken::Identifier))
6390    return Error(L, "unexpected token in .syntax directive");
6391  StringRef Mode = Tok.getString();
6392  if (Mode == "unified" || Mode == "UNIFIED")
6393    Parser.Lex();
6394  else if (Mode == "divided" || Mode == "DIVIDED")
6395    return Error(L, "'.syntax divided' arm asssembly not supported");
6396  else
6397    return Error(L, "unrecognized syntax mode in .syntax directive");
6398
6399  if (getLexer().isNot(AsmToken::EndOfStatement))
6400    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6401  Parser.Lex();
6402
6403  // TODO tell the MC streamer the mode
6404  // getParser().getStreamer().Emit???();
6405  return false;
6406}
6407
6408/// parseDirectiveCode
6409///  ::= .code 16 | 32
6410bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6411  const AsmToken &Tok = Parser.getTok();
6412  if (Tok.isNot(AsmToken::Integer))
6413    return Error(L, "unexpected token in .code directive");
6414  int64_t Val = Parser.getTok().getIntVal();
6415  if (Val == 16)
6416    Parser.Lex();
6417  else if (Val == 32)
6418    Parser.Lex();
6419  else
6420    return Error(L, "invalid operand to .code directive");
6421
6422  if (getLexer().isNot(AsmToken::EndOfStatement))
6423    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6424  Parser.Lex();
6425
6426  if (Val == 16) {
6427    if (!isThumb())
6428      SwitchMode();
6429    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6430  } else {
6431    if (isThumb())
6432      SwitchMode();
6433    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6434  }
6435
6436  return false;
6437}
6438
6439/// parseDirectiveReq
6440///  ::= name .req registername
6441bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6442  Parser.Lex(); // Eat the '.req' token.
6443  unsigned Reg;
6444  SMLoc SRegLoc, ERegLoc;
6445  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6446    Parser.EatToEndOfStatement();
6447    return Error(SRegLoc, "register name expected");
6448  }
6449
6450  // Shouldn't be anything else.
6451  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6452    Parser.EatToEndOfStatement();
6453    return Error(Parser.getTok().getLoc(),
6454                 "unexpected input in .req directive.");
6455  }
6456
6457  Parser.Lex(); // Consume the EndOfStatement
6458
6459  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6460    return Error(SRegLoc, "redefinition of '" + Name +
6461                          "' does not match original.");
6462
6463  return false;
6464}
6465
6466/// parseDirectiveUneq
6467///  ::= .unreq registername
6468bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6469  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6470    Parser.EatToEndOfStatement();
6471    return Error(L, "unexpected input in .unreq directive.");
6472  }
6473  RegisterReqs.erase(Parser.getTok().getIdentifier());
6474  Parser.Lex(); // Eat the identifier.
6475  return false;
6476}
6477
6478/// parseDirectiveArch
6479///  ::= .arch token
6480bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
6481  return true;
6482}
6483
6484/// parseDirectiveEabiAttr
6485///  ::= .eabi_attribute int, int
6486bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
6487  return true;
6488}
6489
6490extern "C" void LLVMInitializeARMAsmLexer();
6491
6492/// Force static initialization.
6493extern "C" void LLVMInitializeARMAsmParser() {
6494  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6495  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6496  LLVMInitializeARMAsmLexer();
6497}
6498
6499#define GET_REGISTER_MATCHER
6500#define GET_MATCHER_IMPLEMENTATION
6501#include "ARMGenAsmMatcher.inc"
6502