ARMAsmParser.cpp revision e983a134e7e40e214f590c3d8ba565bb85f39628
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104  bool parseDirectiveArch(SMLoc L);
105  bool parseDirectiveEabiAttr(SMLoc L);
106
107  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
108                          bool &CarrySetting, unsigned &ProcessorIMod,
109                          StringRef &ITMask);
110  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
111                             bool &CanAcceptPredicationCode);
112
113  bool isThumb() const {
114    // FIXME: Can tablegen auto-generate this?
115    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
116  }
117  bool isThumbOne() const {
118    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
119  }
120  bool isThumbTwo() const {
121    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
122  }
123  bool hasV6Ops() const {
124    return STI.getFeatureBits() & ARM::HasV6Ops;
125  }
126  bool hasV7Ops() const {
127    return STI.getFeatureBits() & ARM::HasV7Ops;
128  }
129  void SwitchMode() {
130    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
131    setAvailableFeatures(FB);
132  }
133  bool isMClass() const {
134    return STI.getFeatureBits() & ARM::FeatureMClass;
135  }
136
137  /// @name Auto-generated Match Functions
138  /// {
139
140#define GET_ASSEMBLER_HEADER
141#include "ARMGenAsmMatcher.inc"
142
143  /// }
144
145  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocNumOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocRegOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseCoprocOptionOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseMemBarrierOptOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseProcIFlagsOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parseMSRMaskOperand(
157    SmallVectorImpl<MCParsedAsmOperand*>&);
158  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
159                                   StringRef Op, int Low, int High);
160  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
161    return parsePKHImm(O, "lsl", 0, 31);
162  }
163  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
164    return parsePKHImm(O, "asr", 1, 32);
165  }
166  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
175
176  // Asm Match Converter Methods
177  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
180                    const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
194                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
202                             const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
206                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
208                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
212                     const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
214                        const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
216                     const SmallVectorImpl<MCParsedAsmOperand*> &);
217  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
218                        const SmallVectorImpl<MCParsedAsmOperand*> &);
219
220  bool validateInstruction(MCInst &Inst,
221                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool processInstruction(MCInst &Inst,
223                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
224  bool shouldOmitCCOutOperand(StringRef Mnemonic,
225                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
226
227public:
228  enum ARMMatchResultTy {
229    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
230    Match_RequiresNotITBlock,
231    Match_RequiresV6,
232    Match_RequiresThumb2
233  };
234
235  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
236    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
237    MCAsmParserExtension::Initialize(_Parser);
238
239    // Initialize the set of available features.
240    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
241
242    // Not in an ITBlock to start with.
243    ITState.CurPosition = ~0U;
244  }
245
246  // Implementation of the MCTargetAsmParser interface:
247  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
248  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
249                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
250  bool ParseDirective(AsmToken DirectiveID);
251
252  unsigned checkTargetMatchPredicate(MCInst &Inst);
253
254  bool MatchAndEmitInstruction(SMLoc IDLoc,
255                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
256                               MCStreamer &Out);
257};
258} // end anonymous namespace
259
260namespace {
261
262/// ARMOperand - Instances of this class represent a parsed ARM machine
263/// instruction.
264class ARMOperand : public MCParsedAsmOperand {
265  enum KindTy {
266    k_CondCode,
267    k_CCOut,
268    k_ITCondMask,
269    k_CoprocNum,
270    k_CoprocReg,
271    k_CoprocOption,
272    k_Immediate,
273    k_MemBarrierOpt,
274    k_Memory,
275    k_PostIndexRegister,
276    k_MSRMask,
277    k_ProcIFlags,
278    k_VectorIndex,
279    k_Register,
280    k_RegisterList,
281    k_DPRRegisterList,
282    k_SPRRegisterList,
283    k_VectorList,
284    k_VectorListAllLanes,
285    k_VectorListIndexed,
286    k_ShiftedRegister,
287    k_ShiftedImmediate,
288    k_ShifterImmediate,
289    k_RotateImmediate,
290    k_BitfieldDescriptor,
291    k_Token
292  } Kind;
293
294  SMLoc StartLoc, EndLoc;
295  SmallVector<unsigned, 8> Registers;
296
297  union {
298    struct {
299      ARMCC::CondCodes Val;
300    } CC;
301
302    struct {
303      unsigned Val;
304    } Cop;
305
306    struct {
307      unsigned Val;
308    } CoprocOption;
309
310    struct {
311      unsigned Mask:4;
312    } ITMask;
313
314    struct {
315      ARM_MB::MemBOpt Val;
316    } MBOpt;
317
318    struct {
319      ARM_PROC::IFlags Val;
320    } IFlags;
321
322    struct {
323      unsigned Val;
324    } MMask;
325
326    struct {
327      const char *Data;
328      unsigned Length;
329    } Tok;
330
331    struct {
332      unsigned RegNum;
333    } Reg;
334
335    // A vector register list is a sequential list of 1 to 4 registers.
336    struct {
337      unsigned RegNum;
338      unsigned Count;
339      unsigned LaneIndex;
340      bool isDoubleSpaced;
341    } VectorList;
342
343    struct {
344      unsigned Val;
345    } VectorIndex;
346
347    struct {
348      const MCExpr *Val;
349    } Imm;
350
351    /// Combined record for all forms of ARM address expressions.
352    struct {
353      unsigned BaseRegNum;
354      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
355      // was specified.
356      const MCConstantExpr *OffsetImm;  // Offset immediate value
357      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
358      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
359      unsigned ShiftImm;        // shift for OffsetReg.
360      unsigned Alignment;       // 0 = no alignment specified
361                                // n = alignment in bytes (2, 4, 8, 16, or 32)
362      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
363    } Memory;
364
365    struct {
366      unsigned RegNum;
367      bool isAdd;
368      ARM_AM::ShiftOpc ShiftTy;
369      unsigned ShiftImm;
370    } PostIdxReg;
371
372    struct {
373      bool isASR;
374      unsigned Imm;
375    } ShifterImm;
376    struct {
377      ARM_AM::ShiftOpc ShiftTy;
378      unsigned SrcReg;
379      unsigned ShiftReg;
380      unsigned ShiftImm;
381    } RegShiftedReg;
382    struct {
383      ARM_AM::ShiftOpc ShiftTy;
384      unsigned SrcReg;
385      unsigned ShiftImm;
386    } RegShiftedImm;
387    struct {
388      unsigned Imm;
389    } RotImm;
390    struct {
391      unsigned LSB;
392      unsigned Width;
393    } Bitfield;
394  };
395
396  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
397public:
398  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
399    Kind = o.Kind;
400    StartLoc = o.StartLoc;
401    EndLoc = o.EndLoc;
402    switch (Kind) {
403    case k_CondCode:
404      CC = o.CC;
405      break;
406    case k_ITCondMask:
407      ITMask = o.ITMask;
408      break;
409    case k_Token:
410      Tok = o.Tok;
411      break;
412    case k_CCOut:
413    case k_Register:
414      Reg = o.Reg;
415      break;
416    case k_RegisterList:
417    case k_DPRRegisterList:
418    case k_SPRRegisterList:
419      Registers = o.Registers;
420      break;
421    case k_VectorList:
422    case k_VectorListAllLanes:
423    case k_VectorListIndexed:
424      VectorList = o.VectorList;
425      break;
426    case k_CoprocNum:
427    case k_CoprocReg:
428      Cop = o.Cop;
429      break;
430    case k_CoprocOption:
431      CoprocOption = o.CoprocOption;
432      break;
433    case k_Immediate:
434      Imm = o.Imm;
435      break;
436    case k_MemBarrierOpt:
437      MBOpt = o.MBOpt;
438      break;
439    case k_Memory:
440      Memory = o.Memory;
441      break;
442    case k_PostIndexRegister:
443      PostIdxReg = o.PostIdxReg;
444      break;
445    case k_MSRMask:
446      MMask = o.MMask;
447      break;
448    case k_ProcIFlags:
449      IFlags = o.IFlags;
450      break;
451    case k_ShifterImmediate:
452      ShifterImm = o.ShifterImm;
453      break;
454    case k_ShiftedRegister:
455      RegShiftedReg = o.RegShiftedReg;
456      break;
457    case k_ShiftedImmediate:
458      RegShiftedImm = o.RegShiftedImm;
459      break;
460    case k_RotateImmediate:
461      RotImm = o.RotImm;
462      break;
463    case k_BitfieldDescriptor:
464      Bitfield = o.Bitfield;
465      break;
466    case k_VectorIndex:
467      VectorIndex = o.VectorIndex;
468      break;
469    }
470  }
471
472  /// getStartLoc - Get the location of the first token of this operand.
473  SMLoc getStartLoc() const { return StartLoc; }
474  /// getEndLoc - Get the location of the last token of this operand.
475  SMLoc getEndLoc() const { return EndLoc; }
476
477  ARMCC::CondCodes getCondCode() const {
478    assert(Kind == k_CondCode && "Invalid access!");
479    return CC.Val;
480  }
481
482  unsigned getCoproc() const {
483    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
484    return Cop.Val;
485  }
486
487  StringRef getToken() const {
488    assert(Kind == k_Token && "Invalid access!");
489    return StringRef(Tok.Data, Tok.Length);
490  }
491
492  unsigned getReg() const {
493    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
494    return Reg.RegNum;
495  }
496
497  const SmallVectorImpl<unsigned> &getRegList() const {
498    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
499            Kind == k_SPRRegisterList) && "Invalid access!");
500    return Registers;
501  }
502
503  const MCExpr *getImm() const {
504    assert(isImm() && "Invalid access!");
505    return Imm.Val;
506  }
507
508  unsigned getVectorIndex() const {
509    assert(Kind == k_VectorIndex && "Invalid access!");
510    return VectorIndex.Val;
511  }
512
513  ARM_MB::MemBOpt getMemBarrierOpt() const {
514    assert(Kind == k_MemBarrierOpt && "Invalid access!");
515    return MBOpt.Val;
516  }
517
518  ARM_PROC::IFlags getProcIFlags() const {
519    assert(Kind == k_ProcIFlags && "Invalid access!");
520    return IFlags.Val;
521  }
522
523  unsigned getMSRMask() const {
524    assert(Kind == k_MSRMask && "Invalid access!");
525    return MMask.Val;
526  }
527
528  bool isCoprocNum() const { return Kind == k_CoprocNum; }
529  bool isCoprocReg() const { return Kind == k_CoprocReg; }
530  bool isCoprocOption() const { return Kind == k_CoprocOption; }
531  bool isCondCode() const { return Kind == k_CondCode; }
532  bool isCCOut() const { return Kind == k_CCOut; }
533  bool isITMask() const { return Kind == k_ITCondMask; }
534  bool isITCondCode() const { return Kind == k_CondCode; }
535  bool isImm() const { return Kind == k_Immediate; }
536  bool isFPImm() const {
537    if (!isImm()) return false;
538    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
539    if (!CE) return false;
540    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
541    return Val != -1;
542  }
543  bool isFBits16() const {
544    if (!isImm()) return false;
545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
546    if (!CE) return false;
547    int64_t Value = CE->getValue();
548    return Value >= 0 && Value <= 16;
549  }
550  bool isFBits32() const {
551    if (!isImm()) return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return Value >= 1 && Value <= 32;
556  }
557  bool isImm8s4() const {
558    if (!isImm()) return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
563  }
564  bool isImm0_1020s4() const {
565    if (!isImm()) return false;
566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
567    if (!CE) return false;
568    int64_t Value = CE->getValue();
569    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
570  }
571  bool isImm0_508s4() const {
572    if (!isImm()) return false;
573    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
574    if (!CE) return false;
575    int64_t Value = CE->getValue();
576    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
577  }
578  bool isImm0_255() const {
579    if (!isImm()) return false;
580    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
581    if (!CE) return false;
582    int64_t Value = CE->getValue();
583    return Value >= 0 && Value < 256;
584  }
585  bool isImm0_1() const {
586    if (!isImm()) return false;
587    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
588    if (!CE) return false;
589    int64_t Value = CE->getValue();
590    return Value >= 0 && Value < 2;
591  }
592  bool isImm0_3() const {
593    if (!isImm()) return false;
594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
595    if (!CE) return false;
596    int64_t Value = CE->getValue();
597    return Value >= 0 && Value < 4;
598  }
599  bool isImm0_7() const {
600    if (!isImm()) return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 8;
605  }
606  bool isImm0_15() const {
607    if (!isImm()) return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 16;
612  }
613  bool isImm0_31() const {
614    if (!isImm()) return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (!isImm()) return false;
622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
623    if (!CE) return false;
624    int64_t Value = CE->getValue();
625    return Value >= 0 && Value < 64;
626  }
627  bool isImm8() const {
628    if (!isImm()) return false;
629    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
630    if (!CE) return false;
631    int64_t Value = CE->getValue();
632    return Value == 8;
633  }
634  bool isImm16() const {
635    if (!isImm()) return false;
636    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
637    if (!CE) return false;
638    int64_t Value = CE->getValue();
639    return Value == 16;
640  }
641  bool isImm32() const {
642    if (!isImm()) return false;
643    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
644    if (!CE) return false;
645    int64_t Value = CE->getValue();
646    return Value == 32;
647  }
648  bool isShrImm8() const {
649    if (!isImm()) return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value > 0 && Value <= 8;
654  }
655  bool isShrImm16() const {
656    if (!isImm()) return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value <= 16;
661  }
662  bool isShrImm32() const {
663    if (!isImm()) return false;
664    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
665    if (!CE) return false;
666    int64_t Value = CE->getValue();
667    return Value > 0 && Value <= 32;
668  }
669  bool isShrImm64() const {
670    if (!isImm()) return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 64;
675  }
676  bool isImm1_7() const {
677    if (!isImm()) return false;
678    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
679    if (!CE) return false;
680    int64_t Value = CE->getValue();
681    return Value > 0 && Value < 8;
682  }
683  bool isImm1_15() const {
684    if (!isImm()) return false;
685    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
686    if (!CE) return false;
687    int64_t Value = CE->getValue();
688    return Value > 0 && Value < 16;
689  }
690  bool isImm1_31() const {
691    if (!isImm()) return false;
692    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
693    if (!CE) return false;
694    int64_t Value = CE->getValue();
695    return Value > 0 && Value < 32;
696  }
697  bool isImm1_16() const {
698    if (!isImm()) return false;
699    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
700    if (!CE) return false;
701    int64_t Value = CE->getValue();
702    return Value > 0 && Value < 17;
703  }
704  bool isImm1_32() const {
705    if (!isImm()) return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value > 0 && Value < 33;
710  }
711  bool isImm0_32() const {
712    if (!isImm()) return false;
713    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
714    if (!CE) return false;
715    int64_t Value = CE->getValue();
716    return Value >= 0 && Value < 33;
717  }
718  bool isImm0_65535() const {
719    if (!isImm()) return false;
720    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
721    if (!CE) return false;
722    int64_t Value = CE->getValue();
723    return Value >= 0 && Value < 65536;
724  }
725  bool isImm0_65535Expr() const {
726    if (!isImm()) return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    // If it's not a constant expression, it'll generate a fixup and be
729    // handled later.
730    if (!CE) return true;
731    int64_t Value = CE->getValue();
732    return Value >= 0 && Value < 65536;
733  }
734  bool isImm24bit() const {
735    if (!isImm()) return false;
736    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
737    if (!CE) return false;
738    int64_t Value = CE->getValue();
739    return Value >= 0 && Value <= 0xffffff;
740  }
741  bool isImmThumbSR() const {
742    if (!isImm()) return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    if (!CE) return false;
745    int64_t Value = CE->getValue();
746    return Value > 0 && Value < 33;
747  }
748  bool isPKHLSLImm() const {
749    if (!isImm()) return false;
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Value = CE->getValue();
753    return Value >= 0 && Value < 32;
754  }
755  bool isPKHASRImm() const {
756    if (!isImm()) return false;
757    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
758    if (!CE) return false;
759    int64_t Value = CE->getValue();
760    return Value > 0 && Value <= 32;
761  }
762  bool isARMSOImm() const {
763    if (!isImm()) return false;
764    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
765    if (!CE) return false;
766    int64_t Value = CE->getValue();
767    return ARM_AM::getSOImmVal(Value) != -1;
768  }
769  bool isARMSOImmNot() const {
770    if (!isImm()) return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getSOImmVal(~Value) != -1;
775  }
776  bool isARMSOImmNeg() const {
777    if (!isImm()) return false;
778    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
779    if (!CE) return false;
780    int64_t Value = CE->getValue();
781    return ARM_AM::getSOImmVal(-Value) != -1;
782  }
783  bool isT2SOImm() const {
784    if (!isImm()) return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getT2SOImmVal(Value) != -1;
789  }
790  bool isT2SOImmNot() const {
791    if (!isImm()) return false;
792    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
793    if (!CE) return false;
794    int64_t Value = CE->getValue();
795    return ARM_AM::getT2SOImmVal(~Value) != -1;
796  }
797  bool isT2SOImmNeg() const {
798    if (!isImm()) return false;
799    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
800    if (!CE) return false;
801    int64_t Value = CE->getValue();
802    return ARM_AM::getT2SOImmVal(-Value) != -1;
803  }
804  bool isSetEndImm() const {
805    if (!isImm()) return false;
806    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
807    if (!CE) return false;
808    int64_t Value = CE->getValue();
809    return Value == 1 || Value == 0;
810  }
811  bool isReg() const { return Kind == k_Register; }
812  bool isRegList() const { return Kind == k_RegisterList; }
813  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
814  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
815  bool isToken() const { return Kind == k_Token; }
816  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
817  bool isMemory() const { return Kind == k_Memory; }
818  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
819  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
820  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
821  bool isRotImm() const { return Kind == k_RotateImmediate; }
822  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
823  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
824  bool isPostIdxReg() const {
825    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
826  }
827  bool isMemNoOffset(bool alignOK = false) const {
828    if (!isMemory())
829      return false;
830    // No offset of any kind.
831    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
832     (alignOK || Memory.Alignment == 0);
833  }
834  bool isMemPCRelImm12() const {
835    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
836      return false;
837    // Base register must be PC.
838    if (Memory.BaseRegNum != ARM::PC)
839      return false;
840    // Immediate offset in range [-4095, 4095].
841    if (!Memory.OffsetImm) return true;
842    int64_t Val = Memory.OffsetImm->getValue();
843    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
844  }
845  bool isAlignedMemory() const {
846    return isMemNoOffset(true);
847  }
848  bool isAddrMode2() const {
849    if (!isMemory() || Memory.Alignment != 0) return false;
850    // Check for register offset.
851    if (Memory.OffsetRegNum) return true;
852    // Immediate offset in range [-4095, 4095].
853    if (!Memory.OffsetImm) return true;
854    int64_t Val = Memory.OffsetImm->getValue();
855    return Val > -4096 && Val < 4096;
856  }
857  bool isAM2OffsetImm() const {
858    if (!isImm()) return false;
859    // Immediate offset in range [-4095, 4095].
860    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
861    if (!CE) return false;
862    int64_t Val = CE->getValue();
863    return Val > -4096 && Val < 4096;
864  }
865  bool isAddrMode3() const {
866    // If we have an immediate that's not a constant, treat it as a label
867    // reference needing a fixup. If it is a constant, it's something else
868    // and we reject it.
869    if (isImm() && !isa<MCConstantExpr>(getImm()))
870      return true;
871    if (!isMemory() || Memory.Alignment != 0) return false;
872    // No shifts are legal for AM3.
873    if (Memory.ShiftType != ARM_AM::no_shift) return false;
874    // Check for register offset.
875    if (Memory.OffsetRegNum) return true;
876    // Immediate offset in range [-255, 255].
877    if (!Memory.OffsetImm) return true;
878    int64_t Val = Memory.OffsetImm->getValue();
879    return Val > -256 && Val < 256;
880  }
881  bool isAM3Offset() const {
882    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
883      return false;
884    if (Kind == k_PostIndexRegister)
885      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
886    // Immediate offset in range [-255, 255].
887    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
888    if (!CE) return false;
889    int64_t Val = CE->getValue();
890    // Special case, #-0 is INT32_MIN.
891    return (Val > -256 && Val < 256) || Val == INT32_MIN;
892  }
893  bool isAddrMode5() const {
894    // If we have an immediate that's not a constant, treat it as a label
895    // reference needing a fixup. If it is a constant, it's something else
896    // and we reject it.
897    if (isImm() && !isa<MCConstantExpr>(getImm()))
898      return true;
899    if (!isMemory() || Memory.Alignment != 0) return false;
900    // Check for register offset.
901    if (Memory.OffsetRegNum) return false;
902    // Immediate offset in range [-1020, 1020] and a multiple of 4.
903    if (!Memory.OffsetImm) return true;
904    int64_t Val = Memory.OffsetImm->getValue();
905    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
906      Val == INT32_MIN;
907  }
908  bool isMemTBB() const {
909    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
910        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
911      return false;
912    return true;
913  }
914  bool isMemTBH() const {
915    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
916        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
917        Memory.Alignment != 0 )
918      return false;
919    return true;
920  }
921  bool isMemRegOffset() const {
922    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
923      return false;
924    return true;
925  }
926  bool isT2MemRegOffset() const {
927    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
928        Memory.Alignment != 0)
929      return false;
930    // Only lsl #{0, 1, 2, 3} allowed.
931    if (Memory.ShiftType == ARM_AM::no_shift)
932      return true;
933    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
934      return false;
935    return true;
936  }
937  bool isMemThumbRR() const {
938    // Thumb reg+reg addressing is simple. Just two registers, a base and
939    // an offset. No shifts, negations or any other complicating factors.
940    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
941        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
942      return false;
943    return isARMLowRegister(Memory.BaseRegNum) &&
944      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
945  }
946  bool isMemThumbRIs4() const {
947    if (!isMemory() || Memory.OffsetRegNum != 0 ||
948        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
949      return false;
950    // Immediate offset, multiple of 4 in range [0, 124].
951    if (!Memory.OffsetImm) return true;
952    int64_t Val = Memory.OffsetImm->getValue();
953    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
954  }
955  bool isMemThumbRIs2() const {
956    if (!isMemory() || Memory.OffsetRegNum != 0 ||
957        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
958      return false;
959    // Immediate offset, multiple of 4 in range [0, 62].
960    if (!Memory.OffsetImm) return true;
961    int64_t Val = Memory.OffsetImm->getValue();
962    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
963  }
964  bool isMemThumbRIs1() const {
965    if (!isMemory() || Memory.OffsetRegNum != 0 ||
966        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
967      return false;
968    // Immediate offset in range [0, 31].
969    if (!Memory.OffsetImm) return true;
970    int64_t Val = Memory.OffsetImm->getValue();
971    return Val >= 0 && Val <= 31;
972  }
973  bool isMemThumbSPI() const {
974    if (!isMemory() || Memory.OffsetRegNum != 0 ||
975        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
976      return false;
977    // Immediate offset, multiple of 4 in range [0, 1020].
978    if (!Memory.OffsetImm) return true;
979    int64_t Val = Memory.OffsetImm->getValue();
980    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
981  }
982  bool isMemImm8s4Offset() const {
983    // If we have an immediate that's not a constant, treat it as a label
984    // reference needing a fixup. If it is a constant, it's something else
985    // and we reject it.
986    if (isImm() && !isa<MCConstantExpr>(getImm()))
987      return true;
988    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
989      return false;
990    // Immediate offset a multiple of 4 in range [-1020, 1020].
991    if (!Memory.OffsetImm) return true;
992    int64_t Val = Memory.OffsetImm->getValue();
993    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
994  }
995  bool isMemImm0_1020s4Offset() const {
996    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
997      return false;
998    // Immediate offset a multiple of 4 in range [0, 1020].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1002  }
1003  bool isMemImm8Offset() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1005      return false;
1006    // Base reg of PC isn't allowed for these encodings.
1007    if (Memory.BaseRegNum == ARM::PC) return false;
1008    // Immediate offset in range [-255, 255].
1009    if (!Memory.OffsetImm) return true;
1010    int64_t Val = Memory.OffsetImm->getValue();
1011    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1012  }
1013  bool isMemPosImm8Offset() const {
1014    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1015      return false;
1016    // Immediate offset in range [0, 255].
1017    if (!Memory.OffsetImm) return true;
1018    int64_t Val = Memory.OffsetImm->getValue();
1019    return Val >= 0 && Val < 256;
1020  }
1021  bool isMemNegImm8Offset() const {
1022    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1023      return false;
1024    // Base reg of PC isn't allowed for these encodings.
1025    if (Memory.BaseRegNum == ARM::PC) return false;
1026    // Immediate offset in range [-255, -1].
1027    if (!Memory.OffsetImm) return false;
1028    int64_t Val = Memory.OffsetImm->getValue();
1029    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1030  }
1031  bool isMemUImm12Offset() const {
1032    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1033      return false;
1034    // Immediate offset in range [0, 4095].
1035    if (!Memory.OffsetImm) return true;
1036    int64_t Val = Memory.OffsetImm->getValue();
1037    return (Val >= 0 && Val < 4096);
1038  }
1039  bool isMemImm12Offset() const {
1040    // If we have an immediate that's not a constant, treat it as a label
1041    // reference needing a fixup. If it is a constant, it's something else
1042    // and we reject it.
1043    if (isImm() && !isa<MCConstantExpr>(getImm()))
1044      return true;
1045
1046    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1047      return false;
1048    // Immediate offset in range [-4095, 4095].
1049    if (!Memory.OffsetImm) return true;
1050    int64_t Val = Memory.OffsetImm->getValue();
1051    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1052  }
1053  bool isPostIdxImm8() const {
1054    if (!isImm()) return false;
1055    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1056    if (!CE) return false;
1057    int64_t Val = CE->getValue();
1058    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1059  }
1060  bool isPostIdxImm8s4() const {
1061    if (!isImm()) return false;
1062    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1063    if (!CE) return false;
1064    int64_t Val = CE->getValue();
1065    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1066      (Val == INT32_MIN);
1067  }
1068
1069  bool isMSRMask() const { return Kind == k_MSRMask; }
1070  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1071
1072  // NEON operands.
1073  bool isSingleSpacedVectorList() const {
1074    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1075  }
1076  bool isDoubleSpacedVectorList() const {
1077    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1078  }
1079  bool isVecListOneD() const {
1080    if (!isSingleSpacedVectorList()) return false;
1081    return VectorList.Count == 1;
1082  }
1083
1084  bool isVecListTwoD() const {
1085    if (!isSingleSpacedVectorList()) return false;
1086    return VectorList.Count == 2;
1087  }
1088
1089  bool isVecListThreeD() const {
1090    if (!isSingleSpacedVectorList()) return false;
1091    return VectorList.Count == 3;
1092  }
1093
1094  bool isVecListFourD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 4;
1097  }
1098
1099  bool isVecListTwoQ() const {
1100    if (!isDoubleSpacedVectorList()) return false;
1101    return VectorList.Count == 2;
1102  }
1103
1104  bool isVecListThreeQ() const {
1105    if (!isDoubleSpacedVectorList()) return false;
1106    return VectorList.Count == 3;
1107  }
1108
1109  bool isVecListFourQ() const {
1110    if (!isDoubleSpacedVectorList()) return false;
1111    return VectorList.Count == 4;
1112  }
1113
1114  bool isSingleSpacedVectorAllLanes() const {
1115    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1116  }
1117  bool isDoubleSpacedVectorAllLanes() const {
1118    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1119  }
1120  bool isVecListOneDAllLanes() const {
1121    if (!isSingleSpacedVectorAllLanes()) return false;
1122    return VectorList.Count == 1;
1123  }
1124
1125  bool isVecListTwoDAllLanes() const {
1126    if (!isSingleSpacedVectorAllLanes()) return false;
1127    return VectorList.Count == 2;
1128  }
1129
1130  bool isVecListTwoQAllLanes() const {
1131    if (!isDoubleSpacedVectorAllLanes()) return false;
1132    return VectorList.Count == 2;
1133  }
1134
1135  bool isSingleSpacedVectorIndexed() const {
1136    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1137  }
1138  bool isDoubleSpacedVectorIndexed() const {
1139    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1140  }
1141  bool isVecListOneDByteIndexed() const {
1142    if (!isSingleSpacedVectorIndexed()) return false;
1143    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1144  }
1145
1146  bool isVecListOneDHWordIndexed() const {
1147    if (!isSingleSpacedVectorIndexed()) return false;
1148    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1149  }
1150
1151  bool isVecListOneDWordIndexed() const {
1152    if (!isSingleSpacedVectorIndexed()) return false;
1153    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1154  }
1155
1156  bool isVecListTwoDByteIndexed() const {
1157    if (!isSingleSpacedVectorIndexed()) return false;
1158    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1159  }
1160
1161  bool isVecListTwoDHWordIndexed() const {
1162    if (!isSingleSpacedVectorIndexed()) return false;
1163    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1164  }
1165
1166  bool isVecListTwoQWordIndexed() const {
1167    if (!isDoubleSpacedVectorIndexed()) return false;
1168    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1169  }
1170
1171  bool isVecListTwoQHWordIndexed() const {
1172    if (!isDoubleSpacedVectorIndexed()) return false;
1173    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1174  }
1175
1176  bool isVecListTwoDWordIndexed() const {
1177    if (!isSingleSpacedVectorIndexed()) return false;
1178    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1179  }
1180
1181  bool isVecListThreeDByteIndexed() const {
1182    if (!isSingleSpacedVectorIndexed()) return false;
1183    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1184  }
1185
1186  bool isVecListThreeDHWordIndexed() const {
1187    if (!isSingleSpacedVectorIndexed()) return false;
1188    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1189  }
1190
1191  bool isVecListThreeQWordIndexed() const {
1192    if (!isDoubleSpacedVectorIndexed()) return false;
1193    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1194  }
1195
1196  bool isVecListThreeQHWordIndexed() const {
1197    if (!isDoubleSpacedVectorIndexed()) return false;
1198    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1199  }
1200
1201  bool isVecListThreeDWordIndexed() const {
1202    if (!isSingleSpacedVectorIndexed()) return false;
1203    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1204  }
1205
1206  bool isVecListFourDByteIndexed() const {
1207    if (!isSingleSpacedVectorIndexed()) return false;
1208    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1209  }
1210
1211  bool isVecListFourDHWordIndexed() const {
1212    if (!isSingleSpacedVectorIndexed()) return false;
1213    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1214  }
1215
1216  bool isVecListFourQWordIndexed() const {
1217    if (!isDoubleSpacedVectorIndexed()) return false;
1218    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1219  }
1220
1221  bool isVecListFourQHWordIndexed() const {
1222    if (!isDoubleSpacedVectorIndexed()) return false;
1223    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1224  }
1225
1226  bool isVecListFourDWordIndexed() const {
1227    if (!isSingleSpacedVectorIndexed()) return false;
1228    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1229  }
1230
1231  bool isVectorIndex8() const {
1232    if (Kind != k_VectorIndex) return false;
1233    return VectorIndex.Val < 8;
1234  }
1235  bool isVectorIndex16() const {
1236    if (Kind != k_VectorIndex) return false;
1237    return VectorIndex.Val < 4;
1238  }
1239  bool isVectorIndex32() const {
1240    if (Kind != k_VectorIndex) return false;
1241    return VectorIndex.Val < 2;
1242  }
1243
1244  bool isNEONi8splat() const {
1245    if (!isImm()) return false;
1246    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1247    // Must be a constant.
1248    if (!CE) return false;
1249    int64_t Value = CE->getValue();
1250    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1251    // value.
1252    return Value >= 0 && Value < 256;
1253  }
1254
1255  bool isNEONi16splat() const {
1256    if (!isImm()) return false;
1257    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1258    // Must be a constant.
1259    if (!CE) return false;
1260    int64_t Value = CE->getValue();
1261    // i16 value in the range [0,255] or [0x0100, 0xff00]
1262    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1263  }
1264
1265  bool isNEONi32splat() const {
1266    if (!isImm()) return false;
1267    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1268    // Must be a constant.
1269    if (!CE) return false;
1270    int64_t Value = CE->getValue();
1271    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1272    return (Value >= 0 && Value < 256) ||
1273      (Value >= 0x0100 && Value <= 0xff00) ||
1274      (Value >= 0x010000 && Value <= 0xff0000) ||
1275      (Value >= 0x01000000 && Value <= 0xff000000);
1276  }
1277
1278  bool isNEONi32vmov() const {
1279    if (!isImm()) return false;
1280    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1281    // Must be a constant.
1282    if (!CE) return false;
1283    int64_t Value = CE->getValue();
1284    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1285    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1286    return (Value >= 0 && Value < 256) ||
1287      (Value >= 0x0100 && Value <= 0xff00) ||
1288      (Value >= 0x010000 && Value <= 0xff0000) ||
1289      (Value >= 0x01000000 && Value <= 0xff000000) ||
1290      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1291      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1292  }
1293  bool isNEONi32vmovNeg() const {
1294    if (!isImm()) return false;
1295    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1296    // Must be a constant.
1297    if (!CE) return false;
1298    int64_t Value = ~CE->getValue();
1299    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1300    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1301    return (Value >= 0 && Value < 256) ||
1302      (Value >= 0x0100 && Value <= 0xff00) ||
1303      (Value >= 0x010000 && Value <= 0xff0000) ||
1304      (Value >= 0x01000000 && Value <= 0xff000000) ||
1305      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1306      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1307  }
1308
1309  bool isNEONi64splat() const {
1310    if (!isImm()) return false;
1311    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1312    // Must be a constant.
1313    if (!CE) return false;
1314    uint64_t Value = CE->getValue();
1315    // i64 value with each byte being either 0 or 0xff.
1316    for (unsigned i = 0; i < 8; ++i)
1317      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1318    return true;
1319  }
1320
1321  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1322    // Add as immediates when possible.  Null MCExpr = 0.
1323    if (Expr == 0)
1324      Inst.addOperand(MCOperand::CreateImm(0));
1325    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1326      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1327    else
1328      Inst.addOperand(MCOperand::CreateExpr(Expr));
1329  }
1330
1331  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1332    assert(N == 2 && "Invalid number of operands!");
1333    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1334    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1335    Inst.addOperand(MCOperand::CreateReg(RegNum));
1336  }
1337
1338  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1339    assert(N == 1 && "Invalid number of operands!");
1340    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1341  }
1342
1343  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1344    assert(N == 1 && "Invalid number of operands!");
1345    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1346  }
1347
1348  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1349    assert(N == 1 && "Invalid number of operands!");
1350    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1351  }
1352
1353  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1354    assert(N == 1 && "Invalid number of operands!");
1355    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1356  }
1357
1358  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1359    assert(N == 1 && "Invalid number of operands!");
1360    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1361  }
1362
1363  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1364    assert(N == 1 && "Invalid number of operands!");
1365    Inst.addOperand(MCOperand::CreateReg(getReg()));
1366  }
1367
1368  void addRegOperands(MCInst &Inst, unsigned N) const {
1369    assert(N == 1 && "Invalid number of operands!");
1370    Inst.addOperand(MCOperand::CreateReg(getReg()));
1371  }
1372
1373  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1374    assert(N == 3 && "Invalid number of operands!");
1375    assert(isRegShiftedReg() &&
1376           "addRegShiftedRegOperands() on non RegShiftedReg!");
1377    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1378    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1379    Inst.addOperand(MCOperand::CreateImm(
1380      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1381  }
1382
1383  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1384    assert(N == 2 && "Invalid number of operands!");
1385    assert(isRegShiftedImm() &&
1386           "addRegShiftedImmOperands() on non RegShiftedImm!");
1387    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1388    Inst.addOperand(MCOperand::CreateImm(
1389      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1390  }
1391
1392  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1393    assert(N == 1 && "Invalid number of operands!");
1394    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1395                                         ShifterImm.Imm));
1396  }
1397
1398  void addRegListOperands(MCInst &Inst, unsigned N) const {
1399    assert(N == 1 && "Invalid number of operands!");
1400    const SmallVectorImpl<unsigned> &RegList = getRegList();
1401    for (SmallVectorImpl<unsigned>::const_iterator
1402           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1403      Inst.addOperand(MCOperand::CreateReg(*I));
1404  }
1405
1406  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1407    addRegListOperands(Inst, N);
1408  }
1409
1410  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1411    addRegListOperands(Inst, N);
1412  }
1413
1414  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1415    assert(N == 1 && "Invalid number of operands!");
1416    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1417    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1418  }
1419
1420  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1421    assert(N == 1 && "Invalid number of operands!");
1422    // Munge the lsb/width into a bitfield mask.
1423    unsigned lsb = Bitfield.LSB;
1424    unsigned width = Bitfield.Width;
1425    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1426    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1427                      (32 - (lsb + width)));
1428    Inst.addOperand(MCOperand::CreateImm(Mask));
1429  }
1430
1431  void addImmOperands(MCInst &Inst, unsigned N) const {
1432    assert(N == 1 && "Invalid number of operands!");
1433    addExpr(Inst, getImm());
1434  }
1435
1436  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1437    assert(N == 1 && "Invalid number of operands!");
1438    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1439    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1440  }
1441
1442  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1443    assert(N == 1 && "Invalid number of operands!");
1444    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1445    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1446  }
1447
1448  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1449    assert(N == 1 && "Invalid number of operands!");
1450    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1451    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1452    Inst.addOperand(MCOperand::CreateImm(Val));
1453  }
1454
1455  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1456    assert(N == 1 && "Invalid number of operands!");
1457    // FIXME: We really want to scale the value here, but the LDRD/STRD
1458    // instruction don't encode operands that way yet.
1459    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1460    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1461  }
1462
1463  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1464    assert(N == 1 && "Invalid number of operands!");
1465    // The immediate is scaled by four in the encoding and is stored
1466    // in the MCInst as such. Lop off the low two bits here.
1467    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1468    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1469  }
1470
1471  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1472    assert(N == 1 && "Invalid number of operands!");
1473    // The immediate is scaled by four in the encoding and is stored
1474    // in the MCInst as such. Lop off the low two bits here.
1475    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1476    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1477  }
1478
1479  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1480    assert(N == 1 && "Invalid number of operands!");
1481    // The constant encodes as the immediate-1, and we store in the instruction
1482    // the bits as encoded, so subtract off one here.
1483    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1484    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1485  }
1486
1487  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1488    assert(N == 1 && "Invalid number of operands!");
1489    // The constant encodes as the immediate-1, and we store in the instruction
1490    // the bits as encoded, so subtract off one here.
1491    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1492    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1493  }
1494
1495  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    // The constant encodes as the immediate, except for 32, which encodes as
1498    // zero.
1499    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1500    unsigned Imm = CE->getValue();
1501    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1502  }
1503
1504  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1505    assert(N == 1 && "Invalid number of operands!");
1506    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1507    // the instruction as well.
1508    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1509    int Val = CE->getValue();
1510    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1511  }
1512
1513  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1514    assert(N == 1 && "Invalid number of operands!");
1515    // The operand is actually a t2_so_imm, but we have its bitwise
1516    // negation in the assembly source, so twiddle it here.
1517    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1518    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1519  }
1520
1521  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1522    assert(N == 1 && "Invalid number of operands!");
1523    // The operand is actually a t2_so_imm, but we have its
1524    // negation in the assembly source, so twiddle it here.
1525    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1526    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1527  }
1528
1529  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1530    assert(N == 1 && "Invalid number of operands!");
1531    // The operand is actually a so_imm, but we have its bitwise
1532    // negation in the assembly source, so twiddle it here.
1533    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1534    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1535  }
1536
1537  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1538    assert(N == 1 && "Invalid number of operands!");
1539    // The operand is actually a so_imm, but we have its
1540    // negation in the assembly source, so twiddle it here.
1541    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1542    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1543  }
1544
1545  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1546    assert(N == 1 && "Invalid number of operands!");
1547    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1548  }
1549
1550  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1551    assert(N == 1 && "Invalid number of operands!");
1552    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1553  }
1554
1555  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1556    assert(N == 1 && "Invalid number of operands!");
1557    int32_t Imm = Memory.OffsetImm->getValue();
1558    // FIXME: Handle #-0
1559    if (Imm == INT32_MIN) Imm = 0;
1560    Inst.addOperand(MCOperand::CreateImm(Imm));
1561  }
1562
1563  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1564    assert(N == 2 && "Invalid number of operands!");
1565    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1566    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1567  }
1568
1569  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1570    assert(N == 3 && "Invalid number of operands!");
1571    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1572    if (!Memory.OffsetRegNum) {
1573      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1574      // Special case for #-0
1575      if (Val == INT32_MIN) Val = 0;
1576      if (Val < 0) Val = -Val;
1577      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1578    } else {
1579      // For register offset, we encode the shift type and negation flag
1580      // here.
1581      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1582                              Memory.ShiftImm, Memory.ShiftType);
1583    }
1584    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1585    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1586    Inst.addOperand(MCOperand::CreateImm(Val));
1587  }
1588
1589  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1590    assert(N == 2 && "Invalid number of operands!");
1591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1592    assert(CE && "non-constant AM2OffsetImm operand!");
1593    int32_t Val = CE->getValue();
1594    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1595    // Special case for #-0
1596    if (Val == INT32_MIN) Val = 0;
1597    if (Val < 0) Val = -Val;
1598    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1599    Inst.addOperand(MCOperand::CreateReg(0));
1600    Inst.addOperand(MCOperand::CreateImm(Val));
1601  }
1602
1603  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1604    assert(N == 3 && "Invalid number of operands!");
1605    // If we have an immediate that's not a constant, treat it as a label
1606    // reference needing a fixup. If it is a constant, it's something else
1607    // and we reject it.
1608    if (isImm()) {
1609      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1610      Inst.addOperand(MCOperand::CreateReg(0));
1611      Inst.addOperand(MCOperand::CreateImm(0));
1612      return;
1613    }
1614
1615    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1616    if (!Memory.OffsetRegNum) {
1617      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1618      // Special case for #-0
1619      if (Val == INT32_MIN) Val = 0;
1620      if (Val < 0) Val = -Val;
1621      Val = ARM_AM::getAM3Opc(AddSub, Val);
1622    } else {
1623      // For register offset, we encode the shift type and negation flag
1624      // here.
1625      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1626    }
1627    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1628    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1629    Inst.addOperand(MCOperand::CreateImm(Val));
1630  }
1631
1632  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1633    assert(N == 2 && "Invalid number of operands!");
1634    if (Kind == k_PostIndexRegister) {
1635      int32_t Val =
1636        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1637      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1638      Inst.addOperand(MCOperand::CreateImm(Val));
1639      return;
1640    }
1641
1642    // Constant offset.
1643    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1644    int32_t Val = CE->getValue();
1645    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1646    // Special case for #-0
1647    if (Val == INT32_MIN) Val = 0;
1648    if (Val < 0) Val = -Val;
1649    Val = ARM_AM::getAM3Opc(AddSub, Val);
1650    Inst.addOperand(MCOperand::CreateReg(0));
1651    Inst.addOperand(MCOperand::CreateImm(Val));
1652  }
1653
1654  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1655    assert(N == 2 && "Invalid number of operands!");
1656    // If we have an immediate that's not a constant, treat it as a label
1657    // reference needing a fixup. If it is a constant, it's something else
1658    // and we reject it.
1659    if (isImm()) {
1660      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1661      Inst.addOperand(MCOperand::CreateImm(0));
1662      return;
1663    }
1664
1665    // The lower two bits are always zero and as such are not encoded.
1666    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1667    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1668    // Special case for #-0
1669    if (Val == INT32_MIN) Val = 0;
1670    if (Val < 0) Val = -Val;
1671    Val = ARM_AM::getAM5Opc(AddSub, Val);
1672    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1673    Inst.addOperand(MCOperand::CreateImm(Val));
1674  }
1675
1676  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1677    assert(N == 2 && "Invalid number of operands!");
1678    // If we have an immediate that's not a constant, treat it as a label
1679    // reference needing a fixup. If it is a constant, it's something else
1680    // and we reject it.
1681    if (isImm()) {
1682      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1683      Inst.addOperand(MCOperand::CreateImm(0));
1684      return;
1685    }
1686
1687    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1688    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1689    Inst.addOperand(MCOperand::CreateImm(Val));
1690  }
1691
1692  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1693    assert(N == 2 && "Invalid number of operands!");
1694    // The lower two bits are always zero and as such are not encoded.
1695    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1696    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1697    Inst.addOperand(MCOperand::CreateImm(Val));
1698  }
1699
1700  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1701    assert(N == 2 && "Invalid number of operands!");
1702    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1703    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1704    Inst.addOperand(MCOperand::CreateImm(Val));
1705  }
1706
1707  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1708    addMemImm8OffsetOperands(Inst, N);
1709  }
1710
1711  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1712    addMemImm8OffsetOperands(Inst, N);
1713  }
1714
1715  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1716    assert(N == 2 && "Invalid number of operands!");
1717    // If this is an immediate, it's a label reference.
1718    if (isImm()) {
1719      addExpr(Inst, getImm());
1720      Inst.addOperand(MCOperand::CreateImm(0));
1721      return;
1722    }
1723
1724    // Otherwise, it's a normal memory reg+offset.
1725    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1726    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1727    Inst.addOperand(MCOperand::CreateImm(Val));
1728  }
1729
1730  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1731    assert(N == 2 && "Invalid number of operands!");
1732    // If this is an immediate, it's a label reference.
1733    if (isImm()) {
1734      addExpr(Inst, getImm());
1735      Inst.addOperand(MCOperand::CreateImm(0));
1736      return;
1737    }
1738
1739    // Otherwise, it's a normal memory reg+offset.
1740    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1741    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1742    Inst.addOperand(MCOperand::CreateImm(Val));
1743  }
1744
1745  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1746    assert(N == 2 && "Invalid number of operands!");
1747    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1748    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1749  }
1750
1751  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1752    assert(N == 2 && "Invalid number of operands!");
1753    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1754    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1755  }
1756
1757  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1758    assert(N == 3 && "Invalid number of operands!");
1759    unsigned Val =
1760      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1761                        Memory.ShiftImm, Memory.ShiftType);
1762    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1763    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1764    Inst.addOperand(MCOperand::CreateImm(Val));
1765  }
1766
1767  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1768    assert(N == 3 && "Invalid number of operands!");
1769    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1770    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1771    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1772  }
1773
1774  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1775    assert(N == 2 && "Invalid number of operands!");
1776    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1777    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1778  }
1779
1780  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1781    assert(N == 2 && "Invalid number of operands!");
1782    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1783    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1784    Inst.addOperand(MCOperand::CreateImm(Val));
1785  }
1786
1787  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1788    assert(N == 2 && "Invalid number of operands!");
1789    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1790    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1791    Inst.addOperand(MCOperand::CreateImm(Val));
1792  }
1793
1794  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1795    assert(N == 2 && "Invalid number of operands!");
1796    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1797    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1798    Inst.addOperand(MCOperand::CreateImm(Val));
1799  }
1800
1801  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1802    assert(N == 2 && "Invalid number of operands!");
1803    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1804    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1805    Inst.addOperand(MCOperand::CreateImm(Val));
1806  }
1807
1808  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1809    assert(N == 1 && "Invalid number of operands!");
1810    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1811    assert(CE && "non-constant post-idx-imm8 operand!");
1812    int Imm = CE->getValue();
1813    bool isAdd = Imm >= 0;
1814    if (Imm == INT32_MIN) Imm = 0;
1815    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1816    Inst.addOperand(MCOperand::CreateImm(Imm));
1817  }
1818
1819  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1820    assert(N == 1 && "Invalid number of operands!");
1821    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1822    assert(CE && "non-constant post-idx-imm8s4 operand!");
1823    int Imm = CE->getValue();
1824    bool isAdd = Imm >= 0;
1825    if (Imm == INT32_MIN) Imm = 0;
1826    // Immediate is scaled by 4.
1827    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1828    Inst.addOperand(MCOperand::CreateImm(Imm));
1829  }
1830
1831  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1832    assert(N == 2 && "Invalid number of operands!");
1833    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1834    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1835  }
1836
1837  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1838    assert(N == 2 && "Invalid number of operands!");
1839    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1840    // The sign, shift type, and shift amount are encoded in a single operand
1841    // using the AM2 encoding helpers.
1842    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1843    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1844                                     PostIdxReg.ShiftTy);
1845    Inst.addOperand(MCOperand::CreateImm(Imm));
1846  }
1847
1848  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1849    assert(N == 1 && "Invalid number of operands!");
1850    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1851  }
1852
1853  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1854    assert(N == 1 && "Invalid number of operands!");
1855    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1856  }
1857
1858  void addVecListOperands(MCInst &Inst, unsigned N) const {
1859    assert(N == 1 && "Invalid number of operands!");
1860    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1861  }
1862
1863  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1864    assert(N == 2 && "Invalid number of operands!");
1865    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1866    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1867  }
1868
1869  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1870    assert(N == 1 && "Invalid number of operands!");
1871    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1872  }
1873
1874  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1875    assert(N == 1 && "Invalid number of operands!");
1876    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1877  }
1878
1879  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1880    assert(N == 1 && "Invalid number of operands!");
1881    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1882  }
1883
1884  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1885    assert(N == 1 && "Invalid number of operands!");
1886    // The immediate encodes the type of constant as well as the value.
1887    // Mask in that this is an i8 splat.
1888    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1889    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1890  }
1891
1892  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1893    assert(N == 1 && "Invalid number of operands!");
1894    // The immediate encodes the type of constant as well as the value.
1895    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1896    unsigned Value = CE->getValue();
1897    if (Value >= 256)
1898      Value = (Value >> 8) | 0xa00;
1899    else
1900      Value |= 0x800;
1901    Inst.addOperand(MCOperand::CreateImm(Value));
1902  }
1903
1904  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1905    assert(N == 1 && "Invalid number of operands!");
1906    // The immediate encodes the type of constant as well as the value.
1907    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1908    unsigned Value = CE->getValue();
1909    if (Value >= 256 && Value <= 0xff00)
1910      Value = (Value >> 8) | 0x200;
1911    else if (Value > 0xffff && Value <= 0xff0000)
1912      Value = (Value >> 16) | 0x400;
1913    else if (Value > 0xffffff)
1914      Value = (Value >> 24) | 0x600;
1915    Inst.addOperand(MCOperand::CreateImm(Value));
1916  }
1917
1918  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1919    assert(N == 1 && "Invalid number of operands!");
1920    // The immediate encodes the type of constant as well as the value.
1921    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1922    unsigned Value = CE->getValue();
1923    if (Value >= 256 && Value <= 0xffff)
1924      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1925    else if (Value > 0xffff && Value <= 0xffffff)
1926      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1927    else if (Value > 0xffffff)
1928      Value = (Value >> 24) | 0x600;
1929    Inst.addOperand(MCOperand::CreateImm(Value));
1930  }
1931
1932  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1933    assert(N == 1 && "Invalid number of operands!");
1934    // The immediate encodes the type of constant as well as the value.
1935    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1936    unsigned Value = ~CE->getValue();
1937    if (Value >= 256 && Value <= 0xffff)
1938      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1939    else if (Value > 0xffff && Value <= 0xffffff)
1940      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1941    else if (Value > 0xffffff)
1942      Value = (Value >> 24) | 0x600;
1943    Inst.addOperand(MCOperand::CreateImm(Value));
1944  }
1945
1946  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1947    assert(N == 1 && "Invalid number of operands!");
1948    // The immediate encodes the type of constant as well as the value.
1949    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1950    uint64_t Value = CE->getValue();
1951    unsigned Imm = 0;
1952    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1953      Imm |= (Value & 1) << i;
1954    }
1955    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1956  }
1957
1958  virtual void print(raw_ostream &OS) const;
1959
1960  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1961    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1962    Op->ITMask.Mask = Mask;
1963    Op->StartLoc = S;
1964    Op->EndLoc = S;
1965    return Op;
1966  }
1967
1968  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1969    ARMOperand *Op = new ARMOperand(k_CondCode);
1970    Op->CC.Val = CC;
1971    Op->StartLoc = S;
1972    Op->EndLoc = S;
1973    return Op;
1974  }
1975
1976  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1977    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1978    Op->Cop.Val = CopVal;
1979    Op->StartLoc = S;
1980    Op->EndLoc = S;
1981    return Op;
1982  }
1983
1984  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1985    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1986    Op->Cop.Val = CopVal;
1987    Op->StartLoc = S;
1988    Op->EndLoc = S;
1989    return Op;
1990  }
1991
1992  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1993    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1994    Op->Cop.Val = Val;
1995    Op->StartLoc = S;
1996    Op->EndLoc = E;
1997    return Op;
1998  }
1999
2000  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2001    ARMOperand *Op = new ARMOperand(k_CCOut);
2002    Op->Reg.RegNum = RegNum;
2003    Op->StartLoc = S;
2004    Op->EndLoc = S;
2005    return Op;
2006  }
2007
2008  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2009    ARMOperand *Op = new ARMOperand(k_Token);
2010    Op->Tok.Data = Str.data();
2011    Op->Tok.Length = Str.size();
2012    Op->StartLoc = S;
2013    Op->EndLoc = S;
2014    return Op;
2015  }
2016
2017  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2018    ARMOperand *Op = new ARMOperand(k_Register);
2019    Op->Reg.RegNum = RegNum;
2020    Op->StartLoc = S;
2021    Op->EndLoc = E;
2022    return Op;
2023  }
2024
2025  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2026                                           unsigned SrcReg,
2027                                           unsigned ShiftReg,
2028                                           unsigned ShiftImm,
2029                                           SMLoc S, SMLoc E) {
2030    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2031    Op->RegShiftedReg.ShiftTy = ShTy;
2032    Op->RegShiftedReg.SrcReg = SrcReg;
2033    Op->RegShiftedReg.ShiftReg = ShiftReg;
2034    Op->RegShiftedReg.ShiftImm = ShiftImm;
2035    Op->StartLoc = S;
2036    Op->EndLoc = E;
2037    return Op;
2038  }
2039
2040  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2041                                            unsigned SrcReg,
2042                                            unsigned ShiftImm,
2043                                            SMLoc S, SMLoc E) {
2044    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2045    Op->RegShiftedImm.ShiftTy = ShTy;
2046    Op->RegShiftedImm.SrcReg = SrcReg;
2047    Op->RegShiftedImm.ShiftImm = ShiftImm;
2048    Op->StartLoc = S;
2049    Op->EndLoc = E;
2050    return Op;
2051  }
2052
2053  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2054                                   SMLoc S, SMLoc E) {
2055    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2056    Op->ShifterImm.isASR = isASR;
2057    Op->ShifterImm.Imm = Imm;
2058    Op->StartLoc = S;
2059    Op->EndLoc = E;
2060    return Op;
2061  }
2062
2063  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2064    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2065    Op->RotImm.Imm = Imm;
2066    Op->StartLoc = S;
2067    Op->EndLoc = E;
2068    return Op;
2069  }
2070
2071  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2072                                    SMLoc S, SMLoc E) {
2073    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2074    Op->Bitfield.LSB = LSB;
2075    Op->Bitfield.Width = Width;
2076    Op->StartLoc = S;
2077    Op->EndLoc = E;
2078    return Op;
2079  }
2080
2081  static ARMOperand *
2082  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2083                SMLoc StartLoc, SMLoc EndLoc) {
2084    KindTy Kind = k_RegisterList;
2085
2086    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2087      Kind = k_DPRRegisterList;
2088    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2089             contains(Regs.front().first))
2090      Kind = k_SPRRegisterList;
2091
2092    ARMOperand *Op = new ARMOperand(Kind);
2093    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2094           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2095      Op->Registers.push_back(I->first);
2096    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2097    Op->StartLoc = StartLoc;
2098    Op->EndLoc = EndLoc;
2099    return Op;
2100  }
2101
2102  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2103                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2104    ARMOperand *Op = new ARMOperand(k_VectorList);
2105    Op->VectorList.RegNum = RegNum;
2106    Op->VectorList.Count = Count;
2107    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2108    Op->StartLoc = S;
2109    Op->EndLoc = E;
2110    return Op;
2111  }
2112
2113  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2114                                              bool isDoubleSpaced,
2115                                              SMLoc S, SMLoc E) {
2116    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2117    Op->VectorList.RegNum = RegNum;
2118    Op->VectorList.Count = Count;
2119    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2120    Op->StartLoc = S;
2121    Op->EndLoc = E;
2122    return Op;
2123  }
2124
2125  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2126                                             unsigned Index,
2127                                             bool isDoubleSpaced,
2128                                             SMLoc S, SMLoc E) {
2129    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2130    Op->VectorList.RegNum = RegNum;
2131    Op->VectorList.Count = Count;
2132    Op->VectorList.LaneIndex = Index;
2133    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2134    Op->StartLoc = S;
2135    Op->EndLoc = E;
2136    return Op;
2137  }
2138
2139  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2140                                       MCContext &Ctx) {
2141    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2142    Op->VectorIndex.Val = Idx;
2143    Op->StartLoc = S;
2144    Op->EndLoc = E;
2145    return Op;
2146  }
2147
2148  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2149    ARMOperand *Op = new ARMOperand(k_Immediate);
2150    Op->Imm.Val = Val;
2151    Op->StartLoc = S;
2152    Op->EndLoc = E;
2153    return Op;
2154  }
2155
2156  static ARMOperand *CreateMem(unsigned BaseRegNum,
2157                               const MCConstantExpr *OffsetImm,
2158                               unsigned OffsetRegNum,
2159                               ARM_AM::ShiftOpc ShiftType,
2160                               unsigned ShiftImm,
2161                               unsigned Alignment,
2162                               bool isNegative,
2163                               SMLoc S, SMLoc E) {
2164    ARMOperand *Op = new ARMOperand(k_Memory);
2165    Op->Memory.BaseRegNum = BaseRegNum;
2166    Op->Memory.OffsetImm = OffsetImm;
2167    Op->Memory.OffsetRegNum = OffsetRegNum;
2168    Op->Memory.ShiftType = ShiftType;
2169    Op->Memory.ShiftImm = ShiftImm;
2170    Op->Memory.Alignment = Alignment;
2171    Op->Memory.isNegative = isNegative;
2172    Op->StartLoc = S;
2173    Op->EndLoc = E;
2174    return Op;
2175  }
2176
2177  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2178                                      ARM_AM::ShiftOpc ShiftTy,
2179                                      unsigned ShiftImm,
2180                                      SMLoc S, SMLoc E) {
2181    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2182    Op->PostIdxReg.RegNum = RegNum;
2183    Op->PostIdxReg.isAdd = isAdd;
2184    Op->PostIdxReg.ShiftTy = ShiftTy;
2185    Op->PostIdxReg.ShiftImm = ShiftImm;
2186    Op->StartLoc = S;
2187    Op->EndLoc = E;
2188    return Op;
2189  }
2190
2191  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2192    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2193    Op->MBOpt.Val = Opt;
2194    Op->StartLoc = S;
2195    Op->EndLoc = S;
2196    return Op;
2197  }
2198
2199  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2200    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2201    Op->IFlags.Val = IFlags;
2202    Op->StartLoc = S;
2203    Op->EndLoc = S;
2204    return Op;
2205  }
2206
2207  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2208    ARMOperand *Op = new ARMOperand(k_MSRMask);
2209    Op->MMask.Val = MMask;
2210    Op->StartLoc = S;
2211    Op->EndLoc = S;
2212    return Op;
2213  }
2214};
2215
2216} // end anonymous namespace.
2217
2218void ARMOperand::print(raw_ostream &OS) const {
2219  switch (Kind) {
2220  case k_CondCode:
2221    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2222    break;
2223  case k_CCOut:
2224    OS << "<ccout " << getReg() << ">";
2225    break;
2226  case k_ITCondMask: {
2227    static const char *MaskStr[] = {
2228      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2229      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2230    };
2231    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2232    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2233    break;
2234  }
2235  case k_CoprocNum:
2236    OS << "<coprocessor number: " << getCoproc() << ">";
2237    break;
2238  case k_CoprocReg:
2239    OS << "<coprocessor register: " << getCoproc() << ">";
2240    break;
2241  case k_CoprocOption:
2242    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2243    break;
2244  case k_MSRMask:
2245    OS << "<mask: " << getMSRMask() << ">";
2246    break;
2247  case k_Immediate:
2248    getImm()->print(OS);
2249    break;
2250  case k_MemBarrierOpt:
2251    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2252    break;
2253  case k_Memory:
2254    OS << "<memory "
2255       << " base:" << Memory.BaseRegNum;
2256    OS << ">";
2257    break;
2258  case k_PostIndexRegister:
2259    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2260       << PostIdxReg.RegNum;
2261    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2262      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2263         << PostIdxReg.ShiftImm;
2264    OS << ">";
2265    break;
2266  case k_ProcIFlags: {
2267    OS << "<ARM_PROC::";
2268    unsigned IFlags = getProcIFlags();
2269    for (int i=2; i >= 0; --i)
2270      if (IFlags & (1 << i))
2271        OS << ARM_PROC::IFlagsToString(1 << i);
2272    OS << ">";
2273    break;
2274  }
2275  case k_Register:
2276    OS << "<register " << getReg() << ">";
2277    break;
2278  case k_ShifterImmediate:
2279    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2280       << " #" << ShifterImm.Imm << ">";
2281    break;
2282  case k_ShiftedRegister:
2283    OS << "<so_reg_reg "
2284       << RegShiftedReg.SrcReg << " "
2285       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2286       << " " << RegShiftedReg.ShiftReg << ">";
2287    break;
2288  case k_ShiftedImmediate:
2289    OS << "<so_reg_imm "
2290       << RegShiftedImm.SrcReg << " "
2291       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2292       << " #" << RegShiftedImm.ShiftImm << ">";
2293    break;
2294  case k_RotateImmediate:
2295    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2296    break;
2297  case k_BitfieldDescriptor:
2298    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2299       << ", width: " << Bitfield.Width << ">";
2300    break;
2301  case k_RegisterList:
2302  case k_DPRRegisterList:
2303  case k_SPRRegisterList: {
2304    OS << "<register_list ";
2305
2306    const SmallVectorImpl<unsigned> &RegList = getRegList();
2307    for (SmallVectorImpl<unsigned>::const_iterator
2308           I = RegList.begin(), E = RegList.end(); I != E; ) {
2309      OS << *I;
2310      if (++I < E) OS << ", ";
2311    }
2312
2313    OS << ">";
2314    break;
2315  }
2316  case k_VectorList:
2317    OS << "<vector_list " << VectorList.Count << " * "
2318       << VectorList.RegNum << ">";
2319    break;
2320  case k_VectorListAllLanes:
2321    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2322       << VectorList.RegNum << ">";
2323    break;
2324  case k_VectorListIndexed:
2325    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2326       << VectorList.Count << " * " << VectorList.RegNum << ">";
2327    break;
2328  case k_Token:
2329    OS << "'" << getToken() << "'";
2330    break;
2331  case k_VectorIndex:
2332    OS << "<vectorindex " << getVectorIndex() << ">";
2333    break;
2334  }
2335}
2336
2337/// @name Auto-generated Match Functions
2338/// {
2339
2340static unsigned MatchRegisterName(StringRef Name);
2341
2342/// }
2343
2344bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2345                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2346  StartLoc = Parser.getTok().getLoc();
2347  RegNo = tryParseRegister();
2348  EndLoc = Parser.getTok().getLoc();
2349
2350  return (RegNo == (unsigned)-1);
2351}
2352
2353/// Try to parse a register name.  The token must be an Identifier when called,
2354/// and if it is a register name the token is eaten and the register number is
2355/// returned.  Otherwise return -1.
2356///
2357int ARMAsmParser::tryParseRegister() {
2358  const AsmToken &Tok = Parser.getTok();
2359  if (Tok.isNot(AsmToken::Identifier)) return -1;
2360
2361  std::string lowerCase = Tok.getString().lower();
2362  unsigned RegNum = MatchRegisterName(lowerCase);
2363  if (!RegNum) {
2364    RegNum = StringSwitch<unsigned>(lowerCase)
2365      .Case("r13", ARM::SP)
2366      .Case("r14", ARM::LR)
2367      .Case("r15", ARM::PC)
2368      .Case("ip", ARM::R12)
2369      // Additional register name aliases for 'gas' compatibility.
2370      .Case("a1", ARM::R0)
2371      .Case("a2", ARM::R1)
2372      .Case("a3", ARM::R2)
2373      .Case("a4", ARM::R3)
2374      .Case("v1", ARM::R4)
2375      .Case("v2", ARM::R5)
2376      .Case("v3", ARM::R6)
2377      .Case("v4", ARM::R7)
2378      .Case("v5", ARM::R8)
2379      .Case("v6", ARM::R9)
2380      .Case("v7", ARM::R10)
2381      .Case("v8", ARM::R11)
2382      .Case("sb", ARM::R9)
2383      .Case("sl", ARM::R10)
2384      .Case("fp", ARM::R11)
2385      .Default(0);
2386  }
2387  if (!RegNum) {
2388    // Check for aliases registered via .req. Canonicalize to lower case.
2389    // That's more consistent since register names are case insensitive, and
2390    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2391    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2392    // If no match, return failure.
2393    if (Entry == RegisterReqs.end())
2394      return -1;
2395    Parser.Lex(); // Eat identifier token.
2396    return Entry->getValue();
2397  }
2398
2399  Parser.Lex(); // Eat identifier token.
2400
2401  return RegNum;
2402}
2403
2404// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2405// If a recoverable error occurs, return 1. If an irrecoverable error
2406// occurs, return -1. An irrecoverable error is one where tokens have been
2407// consumed in the process of trying to parse the shifter (i.e., when it is
2408// indeed a shifter operand, but malformed).
2409int ARMAsmParser::tryParseShiftRegister(
2410                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2411  SMLoc S = Parser.getTok().getLoc();
2412  const AsmToken &Tok = Parser.getTok();
2413  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2414
2415  std::string lowerCase = Tok.getString().lower();
2416  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2417      .Case("asl", ARM_AM::lsl)
2418      .Case("lsl", ARM_AM::lsl)
2419      .Case("lsr", ARM_AM::lsr)
2420      .Case("asr", ARM_AM::asr)
2421      .Case("ror", ARM_AM::ror)
2422      .Case("rrx", ARM_AM::rrx)
2423      .Default(ARM_AM::no_shift);
2424
2425  if (ShiftTy == ARM_AM::no_shift)
2426    return 1;
2427
2428  Parser.Lex(); // Eat the operator.
2429
2430  // The source register for the shift has already been added to the
2431  // operand list, so we need to pop it off and combine it into the shifted
2432  // register operand instead.
2433  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2434  if (!PrevOp->isReg())
2435    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2436  int SrcReg = PrevOp->getReg();
2437  int64_t Imm = 0;
2438  int ShiftReg = 0;
2439  if (ShiftTy == ARM_AM::rrx) {
2440    // RRX Doesn't have an explicit shift amount. The encoder expects
2441    // the shift register to be the same as the source register. Seems odd,
2442    // but OK.
2443    ShiftReg = SrcReg;
2444  } else {
2445    // Figure out if this is shifted by a constant or a register (for non-RRX).
2446    if (Parser.getTok().is(AsmToken::Hash) ||
2447        Parser.getTok().is(AsmToken::Dollar)) {
2448      Parser.Lex(); // Eat hash.
2449      SMLoc ImmLoc = Parser.getTok().getLoc();
2450      const MCExpr *ShiftExpr = 0;
2451      if (getParser().ParseExpression(ShiftExpr)) {
2452        Error(ImmLoc, "invalid immediate shift value");
2453        return -1;
2454      }
2455      // The expression must be evaluatable as an immediate.
2456      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2457      if (!CE) {
2458        Error(ImmLoc, "invalid immediate shift value");
2459        return -1;
2460      }
2461      // Range check the immediate.
2462      // lsl, ror: 0 <= imm <= 31
2463      // lsr, asr: 0 <= imm <= 32
2464      Imm = CE->getValue();
2465      if (Imm < 0 ||
2466          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2467          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2468        Error(ImmLoc, "immediate shift value out of range");
2469        return -1;
2470      }
2471      // shift by zero is a nop. Always send it through as lsl.
2472      // ('as' compatibility)
2473      if (Imm == 0)
2474        ShiftTy = ARM_AM::lsl;
2475    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2476      ShiftReg = tryParseRegister();
2477      SMLoc L = Parser.getTok().getLoc();
2478      if (ShiftReg == -1) {
2479        Error (L, "expected immediate or register in shift operand");
2480        return -1;
2481      }
2482    } else {
2483      Error (Parser.getTok().getLoc(),
2484                    "expected immediate or register in shift operand");
2485      return -1;
2486    }
2487  }
2488
2489  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2490    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2491                                                         ShiftReg, Imm,
2492                                               S, Parser.getTok().getLoc()));
2493  else
2494    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2495                                               S, Parser.getTok().getLoc()));
2496
2497  return 0;
2498}
2499
2500
2501/// Try to parse a register name.  The token must be an Identifier when called.
2502/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2503/// if there is a "writeback". 'true' if it's not a register.
2504///
2505/// TODO this is likely to change to allow different register types and or to
2506/// parse for a specific register type.
2507bool ARMAsmParser::
2508tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2509  SMLoc S = Parser.getTok().getLoc();
2510  int RegNo = tryParseRegister();
2511  if (RegNo == -1)
2512    return true;
2513
2514  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2515
2516  const AsmToken &ExclaimTok = Parser.getTok();
2517  if (ExclaimTok.is(AsmToken::Exclaim)) {
2518    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2519                                               ExclaimTok.getLoc()));
2520    Parser.Lex(); // Eat exclaim token
2521    return false;
2522  }
2523
2524  // Also check for an index operand. This is only legal for vector registers,
2525  // but that'll get caught OK in operand matching, so we don't need to
2526  // explicitly filter everything else out here.
2527  if (Parser.getTok().is(AsmToken::LBrac)) {
2528    SMLoc SIdx = Parser.getTok().getLoc();
2529    Parser.Lex(); // Eat left bracket token.
2530
2531    const MCExpr *ImmVal;
2532    if (getParser().ParseExpression(ImmVal))
2533      return MatchOperand_ParseFail;
2534    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2535    if (!MCE) {
2536      TokError("immediate value expected for vector index");
2537      return MatchOperand_ParseFail;
2538    }
2539
2540    SMLoc E = Parser.getTok().getLoc();
2541    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2542      Error(E, "']' expected");
2543      return MatchOperand_ParseFail;
2544    }
2545
2546    Parser.Lex(); // Eat right bracket token.
2547
2548    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2549                                                     SIdx, E,
2550                                                     getContext()));
2551  }
2552
2553  return false;
2554}
2555
2556/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2557/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2558/// "c5", ...
2559static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2560  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2561  // but efficient.
2562  switch (Name.size()) {
2563  default: return -1;
2564  case 2:
2565    if (Name[0] != CoprocOp)
2566      return -1;
2567    switch (Name[1]) {
2568    default:  return -1;
2569    case '0': return 0;
2570    case '1': return 1;
2571    case '2': return 2;
2572    case '3': return 3;
2573    case '4': return 4;
2574    case '5': return 5;
2575    case '6': return 6;
2576    case '7': return 7;
2577    case '8': return 8;
2578    case '9': return 9;
2579    }
2580  case 3:
2581    if (Name[0] != CoprocOp || Name[1] != '1')
2582      return -1;
2583    switch (Name[2]) {
2584    default:  return -1;
2585    case '0': return 10;
2586    case '1': return 11;
2587    case '2': return 12;
2588    case '3': return 13;
2589    case '4': return 14;
2590    case '5': return 15;
2591    }
2592  }
2593}
2594
2595/// parseITCondCode - Try to parse a condition code for an IT instruction.
2596ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2597parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2598  SMLoc S = Parser.getTok().getLoc();
2599  const AsmToken &Tok = Parser.getTok();
2600  if (!Tok.is(AsmToken::Identifier))
2601    return MatchOperand_NoMatch;
2602  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2603    .Case("eq", ARMCC::EQ)
2604    .Case("ne", ARMCC::NE)
2605    .Case("hs", ARMCC::HS)
2606    .Case("cs", ARMCC::HS)
2607    .Case("lo", ARMCC::LO)
2608    .Case("cc", ARMCC::LO)
2609    .Case("mi", ARMCC::MI)
2610    .Case("pl", ARMCC::PL)
2611    .Case("vs", ARMCC::VS)
2612    .Case("vc", ARMCC::VC)
2613    .Case("hi", ARMCC::HI)
2614    .Case("ls", ARMCC::LS)
2615    .Case("ge", ARMCC::GE)
2616    .Case("lt", ARMCC::LT)
2617    .Case("gt", ARMCC::GT)
2618    .Case("le", ARMCC::LE)
2619    .Case("al", ARMCC::AL)
2620    .Default(~0U);
2621  if (CC == ~0U)
2622    return MatchOperand_NoMatch;
2623  Parser.Lex(); // Eat the token.
2624
2625  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2626
2627  return MatchOperand_Success;
2628}
2629
2630/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2631/// token must be an Identifier when called, and if it is a coprocessor
2632/// number, the token is eaten and the operand is added to the operand list.
2633ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2634parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2635  SMLoc S = Parser.getTok().getLoc();
2636  const AsmToken &Tok = Parser.getTok();
2637  if (Tok.isNot(AsmToken::Identifier))
2638    return MatchOperand_NoMatch;
2639
2640  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2641  if (Num == -1)
2642    return MatchOperand_NoMatch;
2643
2644  Parser.Lex(); // Eat identifier token.
2645  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2646  return MatchOperand_Success;
2647}
2648
2649/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2650/// token must be an Identifier when called, and if it is a coprocessor
2651/// number, the token is eaten and the operand is added to the operand list.
2652ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2653parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2654  SMLoc S = Parser.getTok().getLoc();
2655  const AsmToken &Tok = Parser.getTok();
2656  if (Tok.isNot(AsmToken::Identifier))
2657    return MatchOperand_NoMatch;
2658
2659  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2660  if (Reg == -1)
2661    return MatchOperand_NoMatch;
2662
2663  Parser.Lex(); // Eat identifier token.
2664  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2665  return MatchOperand_Success;
2666}
2667
2668/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2669/// coproc_option : '{' imm0_255 '}'
2670ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2671parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2672  SMLoc S = Parser.getTok().getLoc();
2673
2674  // If this isn't a '{', this isn't a coprocessor immediate operand.
2675  if (Parser.getTok().isNot(AsmToken::LCurly))
2676    return MatchOperand_NoMatch;
2677  Parser.Lex(); // Eat the '{'
2678
2679  const MCExpr *Expr;
2680  SMLoc Loc = Parser.getTok().getLoc();
2681  if (getParser().ParseExpression(Expr)) {
2682    Error(Loc, "illegal expression");
2683    return MatchOperand_ParseFail;
2684  }
2685  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2686  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2687    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2688    return MatchOperand_ParseFail;
2689  }
2690  int Val = CE->getValue();
2691
2692  // Check for and consume the closing '}'
2693  if (Parser.getTok().isNot(AsmToken::RCurly))
2694    return MatchOperand_ParseFail;
2695  SMLoc E = Parser.getTok().getLoc();
2696  Parser.Lex(); // Eat the '}'
2697
2698  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2699  return MatchOperand_Success;
2700}
2701
2702// For register list parsing, we need to map from raw GPR register numbering
2703// to the enumeration values. The enumeration values aren't sorted by
2704// register number due to our using "sp", "lr" and "pc" as canonical names.
2705static unsigned getNextRegister(unsigned Reg) {
2706  // If this is a GPR, we need to do it manually, otherwise we can rely
2707  // on the sort ordering of the enumeration since the other reg-classes
2708  // are sane.
2709  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2710    return Reg + 1;
2711  switch(Reg) {
2712  default: assert(0 && "Invalid GPR number!");
2713  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2714  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2715  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2716  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2717  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2718  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2719  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2720  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2721  }
2722}
2723
2724// Return the low-subreg of a given Q register.
2725static unsigned getDRegFromQReg(unsigned QReg) {
2726  switch (QReg) {
2727  default: llvm_unreachable("expected a Q register!");
2728  case ARM::Q0:  return ARM::D0;
2729  case ARM::Q1:  return ARM::D2;
2730  case ARM::Q2:  return ARM::D4;
2731  case ARM::Q3:  return ARM::D6;
2732  case ARM::Q4:  return ARM::D8;
2733  case ARM::Q5:  return ARM::D10;
2734  case ARM::Q6:  return ARM::D12;
2735  case ARM::Q7:  return ARM::D14;
2736  case ARM::Q8:  return ARM::D16;
2737  case ARM::Q9:  return ARM::D18;
2738  case ARM::Q10: return ARM::D20;
2739  case ARM::Q11: return ARM::D22;
2740  case ARM::Q12: return ARM::D24;
2741  case ARM::Q13: return ARM::D26;
2742  case ARM::Q14: return ARM::D28;
2743  case ARM::Q15: return ARM::D30;
2744  }
2745}
2746
2747/// Parse a register list.
2748bool ARMAsmParser::
2749parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2750  assert(Parser.getTok().is(AsmToken::LCurly) &&
2751         "Token is not a Left Curly Brace");
2752  SMLoc S = Parser.getTok().getLoc();
2753  Parser.Lex(); // Eat '{' token.
2754  SMLoc RegLoc = Parser.getTok().getLoc();
2755
2756  // Check the first register in the list to see what register class
2757  // this is a list of.
2758  int Reg = tryParseRegister();
2759  if (Reg == -1)
2760    return Error(RegLoc, "register expected");
2761
2762  // The reglist instructions have at most 16 registers, so reserve
2763  // space for that many.
2764  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2765
2766  // Allow Q regs and just interpret them as the two D sub-registers.
2767  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2768    Reg = getDRegFromQReg(Reg);
2769    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2770    ++Reg;
2771  }
2772  const MCRegisterClass *RC;
2773  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2774    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2775  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2776    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2777  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2778    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2779  else
2780    return Error(RegLoc, "invalid register in register list");
2781
2782  // Store the register.
2783  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2784
2785  // This starts immediately after the first register token in the list,
2786  // so we can see either a comma or a minus (range separator) as a legal
2787  // next token.
2788  while (Parser.getTok().is(AsmToken::Comma) ||
2789         Parser.getTok().is(AsmToken::Minus)) {
2790    if (Parser.getTok().is(AsmToken::Minus)) {
2791      Parser.Lex(); // Eat the minus.
2792      SMLoc EndLoc = Parser.getTok().getLoc();
2793      int EndReg = tryParseRegister();
2794      if (EndReg == -1)
2795        return Error(EndLoc, "register expected");
2796      // Allow Q regs and just interpret them as the two D sub-registers.
2797      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2798        EndReg = getDRegFromQReg(EndReg) + 1;
2799      // If the register is the same as the start reg, there's nothing
2800      // more to do.
2801      if (Reg == EndReg)
2802        continue;
2803      // The register must be in the same register class as the first.
2804      if (!RC->contains(EndReg))
2805        return Error(EndLoc, "invalid register in register list");
2806      // Ranges must go from low to high.
2807      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2808        return Error(EndLoc, "bad range in register list");
2809
2810      // Add all the registers in the range to the register list.
2811      while (Reg != EndReg) {
2812        Reg = getNextRegister(Reg);
2813        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2814      }
2815      continue;
2816    }
2817    Parser.Lex(); // Eat the comma.
2818    RegLoc = Parser.getTok().getLoc();
2819    int OldReg = Reg;
2820    const AsmToken RegTok = Parser.getTok();
2821    Reg = tryParseRegister();
2822    if (Reg == -1)
2823      return Error(RegLoc, "register expected");
2824    // Allow Q regs and just interpret them as the two D sub-registers.
2825    bool isQReg = false;
2826    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2827      Reg = getDRegFromQReg(Reg);
2828      isQReg = true;
2829    }
2830    // The register must be in the same register class as the first.
2831    if (!RC->contains(Reg))
2832      return Error(RegLoc, "invalid register in register list");
2833    // List must be monotonically increasing.
2834    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2835      return Error(RegLoc, "register list not in ascending order");
2836    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2837      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2838              ") in register list");
2839      continue;
2840    }
2841    // VFP register lists must also be contiguous.
2842    // It's OK to use the enumeration values directly here rather, as the
2843    // VFP register classes have the enum sorted properly.
2844    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2845        Reg != OldReg + 1)
2846      return Error(RegLoc, "non-contiguous register range");
2847    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2848    if (isQReg)
2849      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2850  }
2851
2852  SMLoc E = Parser.getTok().getLoc();
2853  if (Parser.getTok().isNot(AsmToken::RCurly))
2854    return Error(E, "'}' expected");
2855  Parser.Lex(); // Eat '}' token.
2856
2857  // Push the register list operand.
2858  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2859
2860  // The ARM system instruction variants for LDM/STM have a '^' token here.
2861  if (Parser.getTok().is(AsmToken::Caret)) {
2862    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2863    Parser.Lex(); // Eat '^' token.
2864  }
2865
2866  return false;
2867}
2868
2869// Helper function to parse the lane index for vector lists.
2870ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2871parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2872  Index = 0; // Always return a defined index value.
2873  if (Parser.getTok().is(AsmToken::LBrac)) {
2874    Parser.Lex(); // Eat the '['.
2875    if (Parser.getTok().is(AsmToken::RBrac)) {
2876      // "Dn[]" is the 'all lanes' syntax.
2877      LaneKind = AllLanes;
2878      Parser.Lex(); // Eat the ']'.
2879      return MatchOperand_Success;
2880    }
2881    const MCExpr *LaneIndex;
2882    SMLoc Loc = Parser.getTok().getLoc();
2883    if (getParser().ParseExpression(LaneIndex)) {
2884      Error(Loc, "illegal expression");
2885      return MatchOperand_ParseFail;
2886    }
2887    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2888    if (!CE) {
2889      Error(Loc, "lane index must be empty or an integer");
2890      return MatchOperand_ParseFail;
2891    }
2892    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2893      Error(Parser.getTok().getLoc(), "']' expected");
2894      return MatchOperand_ParseFail;
2895    }
2896    Parser.Lex(); // Eat the ']'.
2897    int64_t Val = CE->getValue();
2898
2899    // FIXME: Make this range check context sensitive for .8, .16, .32.
2900    if (Val < 0 || Val > 7) {
2901      Error(Parser.getTok().getLoc(), "lane index out of range");
2902      return MatchOperand_ParseFail;
2903    }
2904    Index = Val;
2905    LaneKind = IndexedLane;
2906    return MatchOperand_Success;
2907  }
2908  LaneKind = NoLanes;
2909  return MatchOperand_Success;
2910}
2911
2912// parse a vector register list
2913ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2914parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2915  VectorLaneTy LaneKind;
2916  unsigned LaneIndex;
2917  SMLoc S = Parser.getTok().getLoc();
2918  // As an extension (to match gas), support a plain D register or Q register
2919  // (without encosing curly braces) as a single or double entry list,
2920  // respectively.
2921  if (Parser.getTok().is(AsmToken::Identifier)) {
2922    int Reg = tryParseRegister();
2923    if (Reg == -1)
2924      return MatchOperand_NoMatch;
2925    SMLoc E = Parser.getTok().getLoc();
2926    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2927      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2928      if (Res != MatchOperand_Success)
2929        return Res;
2930      switch (LaneKind) {
2931      case NoLanes:
2932        E = Parser.getTok().getLoc();
2933        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2934        break;
2935      case AllLanes:
2936        E = Parser.getTok().getLoc();
2937        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2938                                                                S, E));
2939        break;
2940      case IndexedLane:
2941        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2942                                                               LaneIndex,
2943                                                               false, S, E));
2944        break;
2945      }
2946      return MatchOperand_Success;
2947    }
2948    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2949      Reg = getDRegFromQReg(Reg);
2950      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2951      if (Res != MatchOperand_Success)
2952        return Res;
2953      switch (LaneKind) {
2954      case NoLanes:
2955        E = Parser.getTok().getLoc();
2956        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2957        break;
2958      case AllLanes:
2959        E = Parser.getTok().getLoc();
2960        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2961                                                                S, E));
2962        break;
2963      case IndexedLane:
2964        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2965                                                               LaneIndex,
2966                                                               false, S, E));
2967        break;
2968      }
2969      return MatchOperand_Success;
2970    }
2971    Error(S, "vector register expected");
2972    return MatchOperand_ParseFail;
2973  }
2974
2975  if (Parser.getTok().isNot(AsmToken::LCurly))
2976    return MatchOperand_NoMatch;
2977
2978  Parser.Lex(); // Eat '{' token.
2979  SMLoc RegLoc = Parser.getTok().getLoc();
2980
2981  int Reg = tryParseRegister();
2982  if (Reg == -1) {
2983    Error(RegLoc, "register expected");
2984    return MatchOperand_ParseFail;
2985  }
2986  unsigned Count = 1;
2987  int Spacing = 0;
2988  unsigned FirstReg = Reg;
2989  // The list is of D registers, but we also allow Q regs and just interpret
2990  // them as the two D sub-registers.
2991  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2992    FirstReg = Reg = getDRegFromQReg(Reg);
2993    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2994                 // it's ambiguous with four-register single spaced.
2995    ++Reg;
2996    ++Count;
2997  }
2998  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2999    return MatchOperand_ParseFail;
3000
3001  while (Parser.getTok().is(AsmToken::Comma) ||
3002         Parser.getTok().is(AsmToken::Minus)) {
3003    if (Parser.getTok().is(AsmToken::Minus)) {
3004      if (!Spacing)
3005        Spacing = 1; // Register range implies a single spaced list.
3006      else if (Spacing == 2) {
3007        Error(Parser.getTok().getLoc(),
3008              "sequential registers in double spaced list");
3009        return MatchOperand_ParseFail;
3010      }
3011      Parser.Lex(); // Eat the minus.
3012      SMLoc EndLoc = Parser.getTok().getLoc();
3013      int EndReg = tryParseRegister();
3014      if (EndReg == -1) {
3015        Error(EndLoc, "register expected");
3016        return MatchOperand_ParseFail;
3017      }
3018      // Allow Q regs and just interpret them as the two D sub-registers.
3019      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3020        EndReg = getDRegFromQReg(EndReg) + 1;
3021      // If the register is the same as the start reg, there's nothing
3022      // more to do.
3023      if (Reg == EndReg)
3024        continue;
3025      // The register must be in the same register class as the first.
3026      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3027        Error(EndLoc, "invalid register in register list");
3028        return MatchOperand_ParseFail;
3029      }
3030      // Ranges must go from low to high.
3031      if (Reg > EndReg) {
3032        Error(EndLoc, "bad range in register list");
3033        return MatchOperand_ParseFail;
3034      }
3035      // Parse the lane specifier if present.
3036      VectorLaneTy NextLaneKind;
3037      unsigned NextLaneIndex;
3038      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3039        return MatchOperand_ParseFail;
3040      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3041        Error(EndLoc, "mismatched lane index in register list");
3042        return MatchOperand_ParseFail;
3043      }
3044      EndLoc = Parser.getTok().getLoc();
3045
3046      // Add all the registers in the range to the register list.
3047      Count += EndReg - Reg;
3048      Reg = EndReg;
3049      continue;
3050    }
3051    Parser.Lex(); // Eat the comma.
3052    RegLoc = Parser.getTok().getLoc();
3053    int OldReg = Reg;
3054    Reg = tryParseRegister();
3055    if (Reg == -1) {
3056      Error(RegLoc, "register expected");
3057      return MatchOperand_ParseFail;
3058    }
3059    // vector register lists must be contiguous.
3060    // It's OK to use the enumeration values directly here rather, as the
3061    // VFP register classes have the enum sorted properly.
3062    //
3063    // The list is of D registers, but we also allow Q regs and just interpret
3064    // them as the two D sub-registers.
3065    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3066      if (!Spacing)
3067        Spacing = 1; // Register range implies a single spaced list.
3068      else if (Spacing == 2) {
3069        Error(RegLoc,
3070              "invalid register in double-spaced list (must be 'D' register')");
3071        return MatchOperand_ParseFail;
3072      }
3073      Reg = getDRegFromQReg(Reg);
3074      if (Reg != OldReg + 1) {
3075        Error(RegLoc, "non-contiguous register range");
3076        return MatchOperand_ParseFail;
3077      }
3078      ++Reg;
3079      Count += 2;
3080      // Parse the lane specifier if present.
3081      VectorLaneTy NextLaneKind;
3082      unsigned NextLaneIndex;
3083      SMLoc EndLoc = Parser.getTok().getLoc();
3084      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3085        return MatchOperand_ParseFail;
3086      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3087        Error(EndLoc, "mismatched lane index in register list");
3088        return MatchOperand_ParseFail;
3089      }
3090      continue;
3091    }
3092    // Normal D register.
3093    // Figure out the register spacing (single or double) of the list if
3094    // we don't know it already.
3095    if (!Spacing)
3096      Spacing = 1 + (Reg == OldReg + 2);
3097
3098    // Just check that it's contiguous and keep going.
3099    if (Reg != OldReg + Spacing) {
3100      Error(RegLoc, "non-contiguous register range");
3101      return MatchOperand_ParseFail;
3102    }
3103    ++Count;
3104    // Parse the lane specifier if present.
3105    VectorLaneTy NextLaneKind;
3106    unsigned NextLaneIndex;
3107    SMLoc EndLoc = Parser.getTok().getLoc();
3108    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3109      return MatchOperand_ParseFail;
3110    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3111      Error(EndLoc, "mismatched lane index in register list");
3112      return MatchOperand_ParseFail;
3113    }
3114  }
3115
3116  SMLoc E = Parser.getTok().getLoc();
3117  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3118    Error(E, "'}' expected");
3119    return MatchOperand_ParseFail;
3120  }
3121  Parser.Lex(); // Eat '}' token.
3122
3123  switch (LaneKind) {
3124  case NoLanes:
3125    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3126                                                    (Spacing == 2), S, E));
3127    break;
3128  case AllLanes:
3129    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3130                                                            (Spacing == 2),
3131                                                            S, E));
3132    break;
3133  case IndexedLane:
3134    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3135                                                           LaneIndex,
3136                                                           (Spacing == 2),
3137                                                           S, E));
3138    break;
3139  }
3140  return MatchOperand_Success;
3141}
3142
3143/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3144ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3145parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3146  SMLoc S = Parser.getTok().getLoc();
3147  const AsmToken &Tok = Parser.getTok();
3148  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3149  StringRef OptStr = Tok.getString();
3150
3151  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3152    .Case("sy",    ARM_MB::SY)
3153    .Case("st",    ARM_MB::ST)
3154    .Case("sh",    ARM_MB::ISH)
3155    .Case("ish",   ARM_MB::ISH)
3156    .Case("shst",  ARM_MB::ISHST)
3157    .Case("ishst", ARM_MB::ISHST)
3158    .Case("nsh",   ARM_MB::NSH)
3159    .Case("un",    ARM_MB::NSH)
3160    .Case("nshst", ARM_MB::NSHST)
3161    .Case("unst",  ARM_MB::NSHST)
3162    .Case("osh",   ARM_MB::OSH)
3163    .Case("oshst", ARM_MB::OSHST)
3164    .Default(~0U);
3165
3166  if (Opt == ~0U)
3167    return MatchOperand_NoMatch;
3168
3169  Parser.Lex(); // Eat identifier token.
3170  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3171  return MatchOperand_Success;
3172}
3173
3174/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3175ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3176parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3177  SMLoc S = Parser.getTok().getLoc();
3178  const AsmToken &Tok = Parser.getTok();
3179  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3180  StringRef IFlagsStr = Tok.getString();
3181
3182  // An iflags string of "none" is interpreted to mean that none of the AIF
3183  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3184  unsigned IFlags = 0;
3185  if (IFlagsStr != "none") {
3186        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3187      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3188        .Case("a", ARM_PROC::A)
3189        .Case("i", ARM_PROC::I)
3190        .Case("f", ARM_PROC::F)
3191        .Default(~0U);
3192
3193      // If some specific iflag is already set, it means that some letter is
3194      // present more than once, this is not acceptable.
3195      if (Flag == ~0U || (IFlags & Flag))
3196        return MatchOperand_NoMatch;
3197
3198      IFlags |= Flag;
3199    }
3200  }
3201
3202  Parser.Lex(); // Eat identifier token.
3203  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3204  return MatchOperand_Success;
3205}
3206
3207/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3208ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3209parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3210  SMLoc S = Parser.getTok().getLoc();
3211  const AsmToken &Tok = Parser.getTok();
3212  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3213  StringRef Mask = Tok.getString();
3214
3215  if (isMClass()) {
3216    // See ARMv6-M 10.1.1
3217    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3218      .Case("apsr", 0)
3219      .Case("iapsr", 1)
3220      .Case("eapsr", 2)
3221      .Case("xpsr", 3)
3222      .Case("ipsr", 5)
3223      .Case("epsr", 6)
3224      .Case("iepsr", 7)
3225      .Case("msp", 8)
3226      .Case("psp", 9)
3227      .Case("primask", 16)
3228      .Case("basepri", 17)
3229      .Case("basepri_max", 18)
3230      .Case("faultmask", 19)
3231      .Case("control", 20)
3232      .Default(~0U);
3233
3234    if (FlagsVal == ~0U)
3235      return MatchOperand_NoMatch;
3236
3237    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3238      // basepri, basepri_max and faultmask only valid for V7m.
3239      return MatchOperand_NoMatch;
3240
3241    Parser.Lex(); // Eat identifier token.
3242    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3243    return MatchOperand_Success;
3244  }
3245
3246  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3247  size_t Start = 0, Next = Mask.find('_');
3248  StringRef Flags = "";
3249  std::string SpecReg = Mask.slice(Start, Next).lower();
3250  if (Next != StringRef::npos)
3251    Flags = Mask.slice(Next+1, Mask.size());
3252
3253  // FlagsVal contains the complete mask:
3254  // 3-0: Mask
3255  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3256  unsigned FlagsVal = 0;
3257
3258  if (SpecReg == "apsr") {
3259    FlagsVal = StringSwitch<unsigned>(Flags)
3260    .Case("nzcvq",  0x8) // same as CPSR_f
3261    .Case("g",      0x4) // same as CPSR_s
3262    .Case("nzcvqg", 0xc) // same as CPSR_fs
3263    .Default(~0U);
3264
3265    if (FlagsVal == ~0U) {
3266      if (!Flags.empty())
3267        return MatchOperand_NoMatch;
3268      else
3269        FlagsVal = 8; // No flag
3270    }
3271  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3272    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3273      Flags = "fc";
3274    for (int i = 0, e = Flags.size(); i != e; ++i) {
3275      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3276      .Case("c", 1)
3277      .Case("x", 2)
3278      .Case("s", 4)
3279      .Case("f", 8)
3280      .Default(~0U);
3281
3282      // If some specific flag is already set, it means that some letter is
3283      // present more than once, this is not acceptable.
3284      if (FlagsVal == ~0U || (FlagsVal & Flag))
3285        return MatchOperand_NoMatch;
3286      FlagsVal |= Flag;
3287    }
3288  } else // No match for special register.
3289    return MatchOperand_NoMatch;
3290
3291  // Special register without flags is NOT equivalent to "fc" flags.
3292  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3293  // two lines would enable gas compatibility at the expense of breaking
3294  // round-tripping.
3295  //
3296  // if (!FlagsVal)
3297  //  FlagsVal = 0x9;
3298
3299  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3300  if (SpecReg == "spsr")
3301    FlagsVal |= 16;
3302
3303  Parser.Lex(); // Eat identifier token.
3304  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3305  return MatchOperand_Success;
3306}
3307
3308ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3309parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3310            int Low, int High) {
3311  const AsmToken &Tok = Parser.getTok();
3312  if (Tok.isNot(AsmToken::Identifier)) {
3313    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3314    return MatchOperand_ParseFail;
3315  }
3316  StringRef ShiftName = Tok.getString();
3317  std::string LowerOp = Op.lower();
3318  std::string UpperOp = Op.upper();
3319  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3320    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3321    return MatchOperand_ParseFail;
3322  }
3323  Parser.Lex(); // Eat shift type token.
3324
3325  // There must be a '#' and a shift amount.
3326  if (Parser.getTok().isNot(AsmToken::Hash) &&
3327      Parser.getTok().isNot(AsmToken::Dollar)) {
3328    Error(Parser.getTok().getLoc(), "'#' expected");
3329    return MatchOperand_ParseFail;
3330  }
3331  Parser.Lex(); // Eat hash token.
3332
3333  const MCExpr *ShiftAmount;
3334  SMLoc Loc = Parser.getTok().getLoc();
3335  if (getParser().ParseExpression(ShiftAmount)) {
3336    Error(Loc, "illegal expression");
3337    return MatchOperand_ParseFail;
3338  }
3339  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3340  if (!CE) {
3341    Error(Loc, "constant expression expected");
3342    return MatchOperand_ParseFail;
3343  }
3344  int Val = CE->getValue();
3345  if (Val < Low || Val > High) {
3346    Error(Loc, "immediate value out of range");
3347    return MatchOperand_ParseFail;
3348  }
3349
3350  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3351
3352  return MatchOperand_Success;
3353}
3354
3355ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3356parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3357  const AsmToken &Tok = Parser.getTok();
3358  SMLoc S = Tok.getLoc();
3359  if (Tok.isNot(AsmToken::Identifier)) {
3360    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3361    return MatchOperand_ParseFail;
3362  }
3363  int Val = StringSwitch<int>(Tok.getString())
3364    .Case("be", 1)
3365    .Case("le", 0)
3366    .Default(-1);
3367  Parser.Lex(); // Eat the token.
3368
3369  if (Val == -1) {
3370    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3371    return MatchOperand_ParseFail;
3372  }
3373  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3374                                                                  getContext()),
3375                                           S, Parser.getTok().getLoc()));
3376  return MatchOperand_Success;
3377}
3378
3379/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3380/// instructions. Legal values are:
3381///     lsl #n  'n' in [0,31]
3382///     asr #n  'n' in [1,32]
3383///             n == 32 encoded as n == 0.
3384ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3385parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3386  const AsmToken &Tok = Parser.getTok();
3387  SMLoc S = Tok.getLoc();
3388  if (Tok.isNot(AsmToken::Identifier)) {
3389    Error(S, "shift operator 'asr' or 'lsl' expected");
3390    return MatchOperand_ParseFail;
3391  }
3392  StringRef ShiftName = Tok.getString();
3393  bool isASR;
3394  if (ShiftName == "lsl" || ShiftName == "LSL")
3395    isASR = false;
3396  else if (ShiftName == "asr" || ShiftName == "ASR")
3397    isASR = true;
3398  else {
3399    Error(S, "shift operator 'asr' or 'lsl' expected");
3400    return MatchOperand_ParseFail;
3401  }
3402  Parser.Lex(); // Eat the operator.
3403
3404  // A '#' and a shift amount.
3405  if (Parser.getTok().isNot(AsmToken::Hash) &&
3406      Parser.getTok().isNot(AsmToken::Dollar)) {
3407    Error(Parser.getTok().getLoc(), "'#' expected");
3408    return MatchOperand_ParseFail;
3409  }
3410  Parser.Lex(); // Eat hash token.
3411
3412  const MCExpr *ShiftAmount;
3413  SMLoc E = Parser.getTok().getLoc();
3414  if (getParser().ParseExpression(ShiftAmount)) {
3415    Error(E, "malformed shift expression");
3416    return MatchOperand_ParseFail;
3417  }
3418  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3419  if (!CE) {
3420    Error(E, "shift amount must be an immediate");
3421    return MatchOperand_ParseFail;
3422  }
3423
3424  int64_t Val = CE->getValue();
3425  if (isASR) {
3426    // Shift amount must be in [1,32]
3427    if (Val < 1 || Val > 32) {
3428      Error(E, "'asr' shift amount must be in range [1,32]");
3429      return MatchOperand_ParseFail;
3430    }
3431    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3432    if (isThumb() && Val == 32) {
3433      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3434      return MatchOperand_ParseFail;
3435    }
3436    if (Val == 32) Val = 0;
3437  } else {
3438    // Shift amount must be in [1,32]
3439    if (Val < 0 || Val > 31) {
3440      Error(E, "'lsr' shift amount must be in range [0,31]");
3441      return MatchOperand_ParseFail;
3442    }
3443  }
3444
3445  E = Parser.getTok().getLoc();
3446  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3447
3448  return MatchOperand_Success;
3449}
3450
3451/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3452/// of instructions. Legal values are:
3453///     ror #n  'n' in {0, 8, 16, 24}
3454ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3455parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3456  const AsmToken &Tok = Parser.getTok();
3457  SMLoc S = Tok.getLoc();
3458  if (Tok.isNot(AsmToken::Identifier))
3459    return MatchOperand_NoMatch;
3460  StringRef ShiftName = Tok.getString();
3461  if (ShiftName != "ror" && ShiftName != "ROR")
3462    return MatchOperand_NoMatch;
3463  Parser.Lex(); // Eat the operator.
3464
3465  // A '#' and a rotate amount.
3466  if (Parser.getTok().isNot(AsmToken::Hash) &&
3467      Parser.getTok().isNot(AsmToken::Dollar)) {
3468    Error(Parser.getTok().getLoc(), "'#' expected");
3469    return MatchOperand_ParseFail;
3470  }
3471  Parser.Lex(); // Eat hash token.
3472
3473  const MCExpr *ShiftAmount;
3474  SMLoc E = Parser.getTok().getLoc();
3475  if (getParser().ParseExpression(ShiftAmount)) {
3476    Error(E, "malformed rotate expression");
3477    return MatchOperand_ParseFail;
3478  }
3479  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3480  if (!CE) {
3481    Error(E, "rotate amount must be an immediate");
3482    return MatchOperand_ParseFail;
3483  }
3484
3485  int64_t Val = CE->getValue();
3486  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3487  // normally, zero is represented in asm by omitting the rotate operand
3488  // entirely.
3489  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3490    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3491    return MatchOperand_ParseFail;
3492  }
3493
3494  E = Parser.getTok().getLoc();
3495  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3496
3497  return MatchOperand_Success;
3498}
3499
3500ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3501parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3502  SMLoc S = Parser.getTok().getLoc();
3503  // The bitfield descriptor is really two operands, the LSB and the width.
3504  if (Parser.getTok().isNot(AsmToken::Hash) &&
3505      Parser.getTok().isNot(AsmToken::Dollar)) {
3506    Error(Parser.getTok().getLoc(), "'#' expected");
3507    return MatchOperand_ParseFail;
3508  }
3509  Parser.Lex(); // Eat hash token.
3510
3511  const MCExpr *LSBExpr;
3512  SMLoc E = Parser.getTok().getLoc();
3513  if (getParser().ParseExpression(LSBExpr)) {
3514    Error(E, "malformed immediate expression");
3515    return MatchOperand_ParseFail;
3516  }
3517  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3518  if (!CE) {
3519    Error(E, "'lsb' operand must be an immediate");
3520    return MatchOperand_ParseFail;
3521  }
3522
3523  int64_t LSB = CE->getValue();
3524  // The LSB must be in the range [0,31]
3525  if (LSB < 0 || LSB > 31) {
3526    Error(E, "'lsb' operand must be in the range [0,31]");
3527    return MatchOperand_ParseFail;
3528  }
3529  E = Parser.getTok().getLoc();
3530
3531  // Expect another immediate operand.
3532  if (Parser.getTok().isNot(AsmToken::Comma)) {
3533    Error(Parser.getTok().getLoc(), "too few operands");
3534    return MatchOperand_ParseFail;
3535  }
3536  Parser.Lex(); // Eat hash token.
3537  if (Parser.getTok().isNot(AsmToken::Hash) &&
3538      Parser.getTok().isNot(AsmToken::Dollar)) {
3539    Error(Parser.getTok().getLoc(), "'#' expected");
3540    return MatchOperand_ParseFail;
3541  }
3542  Parser.Lex(); // Eat hash token.
3543
3544  const MCExpr *WidthExpr;
3545  if (getParser().ParseExpression(WidthExpr)) {
3546    Error(E, "malformed immediate expression");
3547    return MatchOperand_ParseFail;
3548  }
3549  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3550  if (!CE) {
3551    Error(E, "'width' operand must be an immediate");
3552    return MatchOperand_ParseFail;
3553  }
3554
3555  int64_t Width = CE->getValue();
3556  // The LSB must be in the range [1,32-lsb]
3557  if (Width < 1 || Width > 32 - LSB) {
3558    Error(E, "'width' operand must be in the range [1,32-lsb]");
3559    return MatchOperand_ParseFail;
3560  }
3561  E = Parser.getTok().getLoc();
3562
3563  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3564
3565  return MatchOperand_Success;
3566}
3567
3568ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3569parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3570  // Check for a post-index addressing register operand. Specifically:
3571  // postidx_reg := '+' register {, shift}
3572  //              | '-' register {, shift}
3573  //              | register {, shift}
3574
3575  // This method must return MatchOperand_NoMatch without consuming any tokens
3576  // in the case where there is no match, as other alternatives take other
3577  // parse methods.
3578  AsmToken Tok = Parser.getTok();
3579  SMLoc S = Tok.getLoc();
3580  bool haveEaten = false;
3581  bool isAdd = true;
3582  int Reg = -1;
3583  if (Tok.is(AsmToken::Plus)) {
3584    Parser.Lex(); // Eat the '+' token.
3585    haveEaten = true;
3586  } else if (Tok.is(AsmToken::Minus)) {
3587    Parser.Lex(); // Eat the '-' token.
3588    isAdd = false;
3589    haveEaten = true;
3590  }
3591  if (Parser.getTok().is(AsmToken::Identifier))
3592    Reg = tryParseRegister();
3593  if (Reg == -1) {
3594    if (!haveEaten)
3595      return MatchOperand_NoMatch;
3596    Error(Parser.getTok().getLoc(), "register expected");
3597    return MatchOperand_ParseFail;
3598  }
3599  SMLoc E = Parser.getTok().getLoc();
3600
3601  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3602  unsigned ShiftImm = 0;
3603  if (Parser.getTok().is(AsmToken::Comma)) {
3604    Parser.Lex(); // Eat the ','.
3605    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3606      return MatchOperand_ParseFail;
3607  }
3608
3609  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3610                                                  ShiftImm, S, E));
3611
3612  return MatchOperand_Success;
3613}
3614
3615ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3616parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3617  // Check for a post-index addressing register operand. Specifically:
3618  // am3offset := '+' register
3619  //              | '-' register
3620  //              | register
3621  //              | # imm
3622  //              | # + imm
3623  //              | # - imm
3624
3625  // This method must return MatchOperand_NoMatch without consuming any tokens
3626  // in the case where there is no match, as other alternatives take other
3627  // parse methods.
3628  AsmToken Tok = Parser.getTok();
3629  SMLoc S = Tok.getLoc();
3630
3631  // Do immediates first, as we always parse those if we have a '#'.
3632  if (Parser.getTok().is(AsmToken::Hash) ||
3633      Parser.getTok().is(AsmToken::Dollar)) {
3634    Parser.Lex(); // Eat the '#'.
3635    // Explicitly look for a '-', as we need to encode negative zero
3636    // differently.
3637    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3638    const MCExpr *Offset;
3639    if (getParser().ParseExpression(Offset))
3640      return MatchOperand_ParseFail;
3641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3642    if (!CE) {
3643      Error(S, "constant expression expected");
3644      return MatchOperand_ParseFail;
3645    }
3646    SMLoc E = Tok.getLoc();
3647    // Negative zero is encoded as the flag value INT32_MIN.
3648    int32_t Val = CE->getValue();
3649    if (isNegative && Val == 0)
3650      Val = INT32_MIN;
3651
3652    Operands.push_back(
3653      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3654
3655    return MatchOperand_Success;
3656  }
3657
3658
3659  bool haveEaten = false;
3660  bool isAdd = true;
3661  int Reg = -1;
3662  if (Tok.is(AsmToken::Plus)) {
3663    Parser.Lex(); // Eat the '+' token.
3664    haveEaten = true;
3665  } else if (Tok.is(AsmToken::Minus)) {
3666    Parser.Lex(); // Eat the '-' token.
3667    isAdd = false;
3668    haveEaten = true;
3669  }
3670  if (Parser.getTok().is(AsmToken::Identifier))
3671    Reg = tryParseRegister();
3672  if (Reg == -1) {
3673    if (!haveEaten)
3674      return MatchOperand_NoMatch;
3675    Error(Parser.getTok().getLoc(), "register expected");
3676    return MatchOperand_ParseFail;
3677  }
3678  SMLoc E = Parser.getTok().getLoc();
3679
3680  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3681                                                  0, S, E));
3682
3683  return MatchOperand_Success;
3684}
3685
3686/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3687/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3688/// when they refer multiple MIOperands inside a single one.
3689bool ARMAsmParser::
3690cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3691             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3692  // Rt, Rt2
3693  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3694  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3695  // Create a writeback register dummy placeholder.
3696  Inst.addOperand(MCOperand::CreateReg(0));
3697  // addr
3698  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3699  // pred
3700  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3701  return true;
3702}
3703
3704/// cvtT2StrdPre - Convert parsed operands to MCInst.
3705/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3706/// when they refer multiple MIOperands inside a single one.
3707bool ARMAsmParser::
3708cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3709             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3710  // Create a writeback register dummy placeholder.
3711  Inst.addOperand(MCOperand::CreateReg(0));
3712  // Rt, Rt2
3713  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3714  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3715  // addr
3716  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3717  // pred
3718  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3719  return true;
3720}
3721
3722/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3723/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3724/// when they refer multiple MIOperands inside a single one.
3725bool ARMAsmParser::
3726cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3727                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3728  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3729
3730  // Create a writeback register dummy placeholder.
3731  Inst.addOperand(MCOperand::CreateImm(0));
3732
3733  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3734  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3735  return true;
3736}
3737
3738/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3739/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3740/// when they refer multiple MIOperands inside a single one.
3741bool ARMAsmParser::
3742cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3743                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3744  // Create a writeback register dummy placeholder.
3745  Inst.addOperand(MCOperand::CreateImm(0));
3746  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3747  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3748  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3749  return true;
3750}
3751
3752/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3753/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3754/// when they refer multiple MIOperands inside a single one.
3755bool ARMAsmParser::
3756cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3757                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3758  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3759
3760  // Create a writeback register dummy placeholder.
3761  Inst.addOperand(MCOperand::CreateImm(0));
3762
3763  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3764  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3765  return true;
3766}
3767
3768/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3769/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3770/// when they refer multiple MIOperands inside a single one.
3771bool ARMAsmParser::
3772cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3773                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3774  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3775
3776  // Create a writeback register dummy placeholder.
3777  Inst.addOperand(MCOperand::CreateImm(0));
3778
3779  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3780  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3781  return true;
3782}
3783
3784
3785/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3786/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3787/// when they refer multiple MIOperands inside a single one.
3788bool ARMAsmParser::
3789cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3790                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3791  // Create a writeback register dummy placeholder.
3792  Inst.addOperand(MCOperand::CreateImm(0));
3793  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3794  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3795  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3796  return true;
3797}
3798
3799/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3800/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3801/// when they refer multiple MIOperands inside a single one.
3802bool ARMAsmParser::
3803cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3804                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3805  // Create a writeback register dummy placeholder.
3806  Inst.addOperand(MCOperand::CreateImm(0));
3807  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3808  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3809  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3810  return true;
3811}
3812
3813/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3814/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3815/// when they refer multiple MIOperands inside a single one.
3816bool ARMAsmParser::
3817cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3818                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3819  // Create a writeback register dummy placeholder.
3820  Inst.addOperand(MCOperand::CreateImm(0));
3821  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3822  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3823  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3824  return true;
3825}
3826
3827/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3828/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3829/// when they refer multiple MIOperands inside a single one.
3830bool ARMAsmParser::
3831cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3832                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3833  // Rt
3834  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3835  // Create a writeback register dummy placeholder.
3836  Inst.addOperand(MCOperand::CreateImm(0));
3837  // addr
3838  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3839  // offset
3840  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3841  // pred
3842  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3843  return true;
3844}
3845
3846/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3847/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3848/// when they refer multiple MIOperands inside a single one.
3849bool ARMAsmParser::
3850cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3851                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3852  // Rt
3853  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3854  // Create a writeback register dummy placeholder.
3855  Inst.addOperand(MCOperand::CreateImm(0));
3856  // addr
3857  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3858  // offset
3859  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3860  // pred
3861  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3862  return true;
3863}
3864
3865/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3866/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3867/// when they refer multiple MIOperands inside a single one.
3868bool ARMAsmParser::
3869cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3870                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3871  // Create a writeback register dummy placeholder.
3872  Inst.addOperand(MCOperand::CreateImm(0));
3873  // Rt
3874  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3875  // addr
3876  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3877  // offset
3878  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3879  // pred
3880  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3881  return true;
3882}
3883
3884/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3885/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3886/// when they refer multiple MIOperands inside a single one.
3887bool ARMAsmParser::
3888cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3889                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3890  // Create a writeback register dummy placeholder.
3891  Inst.addOperand(MCOperand::CreateImm(0));
3892  // Rt
3893  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3894  // addr
3895  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3896  // offset
3897  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3898  // pred
3899  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3900  return true;
3901}
3902
3903/// cvtLdrdPre - Convert parsed operands to MCInst.
3904/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3905/// when they refer multiple MIOperands inside a single one.
3906bool ARMAsmParser::
3907cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3908           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3909  // Rt, Rt2
3910  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3911  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3912  // Create a writeback register dummy placeholder.
3913  Inst.addOperand(MCOperand::CreateImm(0));
3914  // addr
3915  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3916  // pred
3917  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3918  return true;
3919}
3920
3921/// cvtStrdPre - Convert parsed operands to MCInst.
3922/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3923/// when they refer multiple MIOperands inside a single one.
3924bool ARMAsmParser::
3925cvtStrdPre(MCInst &Inst, unsigned Opcode,
3926           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3927  // Create a writeback register dummy placeholder.
3928  Inst.addOperand(MCOperand::CreateImm(0));
3929  // Rt, Rt2
3930  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3931  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3932  // addr
3933  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3934  // pred
3935  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3936  return true;
3937}
3938
3939/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3940/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3941/// when they refer multiple MIOperands inside a single one.
3942bool ARMAsmParser::
3943cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3944                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3945  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3946  // Create a writeback register dummy placeholder.
3947  Inst.addOperand(MCOperand::CreateImm(0));
3948  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3949  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3950  return true;
3951}
3952
3953/// cvtThumbMultiple- Convert parsed operands to MCInst.
3954/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3955/// when they refer multiple MIOperands inside a single one.
3956bool ARMAsmParser::
3957cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3958           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3959  // The second source operand must be the same register as the destination
3960  // operand.
3961  if (Operands.size() == 6 &&
3962      (((ARMOperand*)Operands[3])->getReg() !=
3963       ((ARMOperand*)Operands[5])->getReg()) &&
3964      (((ARMOperand*)Operands[3])->getReg() !=
3965       ((ARMOperand*)Operands[4])->getReg())) {
3966    Error(Operands[3]->getStartLoc(),
3967          "destination register must match source register");
3968    return false;
3969  }
3970  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3971  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3972  // If we have a three-operand form, make sure to set Rn to be the operand
3973  // that isn't the same as Rd.
3974  unsigned RegOp = 4;
3975  if (Operands.size() == 6 &&
3976      ((ARMOperand*)Operands[4])->getReg() ==
3977        ((ARMOperand*)Operands[3])->getReg())
3978    RegOp = 5;
3979  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3980  Inst.addOperand(Inst.getOperand(0));
3981  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3982
3983  return true;
3984}
3985
3986bool ARMAsmParser::
3987cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3988              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3989  // Vd
3990  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3991  // Create a writeback register dummy placeholder.
3992  Inst.addOperand(MCOperand::CreateImm(0));
3993  // Vn
3994  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3995  // pred
3996  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3997  return true;
3998}
3999
4000bool ARMAsmParser::
4001cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4002                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4003  // Vd
4004  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4005  // Create a writeback register dummy placeholder.
4006  Inst.addOperand(MCOperand::CreateImm(0));
4007  // Vn
4008  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4009  // Vm
4010  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4011  // pred
4012  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4013  return true;
4014}
4015
4016bool ARMAsmParser::
4017cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4018              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4019  // Create a writeback register dummy placeholder.
4020  Inst.addOperand(MCOperand::CreateImm(0));
4021  // Vn
4022  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4023  // Vt
4024  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4025  // pred
4026  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4027  return true;
4028}
4029
4030bool ARMAsmParser::
4031cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4032                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4033  // Create a writeback register dummy placeholder.
4034  Inst.addOperand(MCOperand::CreateImm(0));
4035  // Vn
4036  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4037  // Vm
4038  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4039  // Vt
4040  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4041  // pred
4042  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4043  return true;
4044}
4045
4046/// Parse an ARM memory expression, return false if successful else return true
4047/// or an error.  The first token must be a '[' when called.
4048bool ARMAsmParser::
4049parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4050  SMLoc S, E;
4051  assert(Parser.getTok().is(AsmToken::LBrac) &&
4052         "Token is not a Left Bracket");
4053  S = Parser.getTok().getLoc();
4054  Parser.Lex(); // Eat left bracket token.
4055
4056  const AsmToken &BaseRegTok = Parser.getTok();
4057  int BaseRegNum = tryParseRegister();
4058  if (BaseRegNum == -1)
4059    return Error(BaseRegTok.getLoc(), "register expected");
4060
4061  // The next token must either be a comma or a closing bracket.
4062  const AsmToken &Tok = Parser.getTok();
4063  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4064    return Error(Tok.getLoc(), "malformed memory operand");
4065
4066  if (Tok.is(AsmToken::RBrac)) {
4067    E = Tok.getLoc();
4068    Parser.Lex(); // Eat right bracket token.
4069
4070    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4071                                             0, 0, false, S, E));
4072
4073    // If there's a pre-indexing writeback marker, '!', just add it as a token
4074    // operand. It's rather odd, but syntactically valid.
4075    if (Parser.getTok().is(AsmToken::Exclaim)) {
4076      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4077      Parser.Lex(); // Eat the '!'.
4078    }
4079
4080    return false;
4081  }
4082
4083  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4084  Parser.Lex(); // Eat the comma.
4085
4086  // If we have a ':', it's an alignment specifier.
4087  if (Parser.getTok().is(AsmToken::Colon)) {
4088    Parser.Lex(); // Eat the ':'.
4089    E = Parser.getTok().getLoc();
4090
4091    const MCExpr *Expr;
4092    if (getParser().ParseExpression(Expr))
4093     return true;
4094
4095    // The expression has to be a constant. Memory references with relocations
4096    // don't come through here, as they use the <label> forms of the relevant
4097    // instructions.
4098    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4099    if (!CE)
4100      return Error (E, "constant expression expected");
4101
4102    unsigned Align = 0;
4103    switch (CE->getValue()) {
4104    default:
4105      return Error(E,
4106                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4107    case 16:  Align = 2; break;
4108    case 32:  Align = 4; break;
4109    case 64:  Align = 8; break;
4110    case 128: Align = 16; break;
4111    case 256: Align = 32; break;
4112    }
4113
4114    // Now we should have the closing ']'
4115    E = Parser.getTok().getLoc();
4116    if (Parser.getTok().isNot(AsmToken::RBrac))
4117      return Error(E, "']' expected");
4118    Parser.Lex(); // Eat right bracket token.
4119
4120    // Don't worry about range checking the value here. That's handled by
4121    // the is*() predicates.
4122    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4123                                             ARM_AM::no_shift, 0, Align,
4124                                             false, S, E));
4125
4126    // If there's a pre-indexing writeback marker, '!', just add it as a token
4127    // operand.
4128    if (Parser.getTok().is(AsmToken::Exclaim)) {
4129      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4130      Parser.Lex(); // Eat the '!'.
4131    }
4132
4133    return false;
4134  }
4135
4136  // If we have a '#', it's an immediate offset, else assume it's a register
4137  // offset. Be friendly and also accept a plain integer (without a leading
4138  // hash) for gas compatibility.
4139  if (Parser.getTok().is(AsmToken::Hash) ||
4140      Parser.getTok().is(AsmToken::Dollar) ||
4141      Parser.getTok().is(AsmToken::Integer)) {
4142    if (Parser.getTok().isNot(AsmToken::Integer))
4143      Parser.Lex(); // Eat the '#'.
4144    E = Parser.getTok().getLoc();
4145
4146    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4147    const MCExpr *Offset;
4148    if (getParser().ParseExpression(Offset))
4149     return true;
4150
4151    // The expression has to be a constant. Memory references with relocations
4152    // don't come through here, as they use the <label> forms of the relevant
4153    // instructions.
4154    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4155    if (!CE)
4156      return Error (E, "constant expression expected");
4157
4158    // If the constant was #-0, represent it as INT32_MIN.
4159    int32_t Val = CE->getValue();
4160    if (isNegative && Val == 0)
4161      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4162
4163    // Now we should have the closing ']'
4164    E = Parser.getTok().getLoc();
4165    if (Parser.getTok().isNot(AsmToken::RBrac))
4166      return Error(E, "']' expected");
4167    Parser.Lex(); // Eat right bracket token.
4168
4169    // Don't worry about range checking the value here. That's handled by
4170    // the is*() predicates.
4171    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4172                                             ARM_AM::no_shift, 0, 0,
4173                                             false, S, E));
4174
4175    // If there's a pre-indexing writeback marker, '!', just add it as a token
4176    // operand.
4177    if (Parser.getTok().is(AsmToken::Exclaim)) {
4178      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4179      Parser.Lex(); // Eat the '!'.
4180    }
4181
4182    return false;
4183  }
4184
4185  // The register offset is optionally preceded by a '+' or '-'
4186  bool isNegative = false;
4187  if (Parser.getTok().is(AsmToken::Minus)) {
4188    isNegative = true;
4189    Parser.Lex(); // Eat the '-'.
4190  } else if (Parser.getTok().is(AsmToken::Plus)) {
4191    // Nothing to do.
4192    Parser.Lex(); // Eat the '+'.
4193  }
4194
4195  E = Parser.getTok().getLoc();
4196  int OffsetRegNum = tryParseRegister();
4197  if (OffsetRegNum == -1)
4198    return Error(E, "register expected");
4199
4200  // If there's a shift operator, handle it.
4201  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4202  unsigned ShiftImm = 0;
4203  if (Parser.getTok().is(AsmToken::Comma)) {
4204    Parser.Lex(); // Eat the ','.
4205    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4206      return true;
4207  }
4208
4209  // Now we should have the closing ']'
4210  E = Parser.getTok().getLoc();
4211  if (Parser.getTok().isNot(AsmToken::RBrac))
4212    return Error(E, "']' expected");
4213  Parser.Lex(); // Eat right bracket token.
4214
4215  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4216                                           ShiftType, ShiftImm, 0, isNegative,
4217                                           S, E));
4218
4219  // If there's a pre-indexing writeback marker, '!', just add it as a token
4220  // operand.
4221  if (Parser.getTok().is(AsmToken::Exclaim)) {
4222    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4223    Parser.Lex(); // Eat the '!'.
4224  }
4225
4226  return false;
4227}
4228
4229/// parseMemRegOffsetShift - one of these two:
4230///   ( lsl | lsr | asr | ror ) , # shift_amount
4231///   rrx
4232/// return true if it parses a shift otherwise it returns false.
4233bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4234                                          unsigned &Amount) {
4235  SMLoc Loc = Parser.getTok().getLoc();
4236  const AsmToken &Tok = Parser.getTok();
4237  if (Tok.isNot(AsmToken::Identifier))
4238    return true;
4239  StringRef ShiftName = Tok.getString();
4240  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4241      ShiftName == "asl" || ShiftName == "ASL")
4242    St = ARM_AM::lsl;
4243  else if (ShiftName == "lsr" || ShiftName == "LSR")
4244    St = ARM_AM::lsr;
4245  else if (ShiftName == "asr" || ShiftName == "ASR")
4246    St = ARM_AM::asr;
4247  else if (ShiftName == "ror" || ShiftName == "ROR")
4248    St = ARM_AM::ror;
4249  else if (ShiftName == "rrx" || ShiftName == "RRX")
4250    St = ARM_AM::rrx;
4251  else
4252    return Error(Loc, "illegal shift operator");
4253  Parser.Lex(); // Eat shift type token.
4254
4255  // rrx stands alone.
4256  Amount = 0;
4257  if (St != ARM_AM::rrx) {
4258    Loc = Parser.getTok().getLoc();
4259    // A '#' and a shift amount.
4260    const AsmToken &HashTok = Parser.getTok();
4261    if (HashTok.isNot(AsmToken::Hash) &&
4262        HashTok.isNot(AsmToken::Dollar))
4263      return Error(HashTok.getLoc(), "'#' expected");
4264    Parser.Lex(); // Eat hash token.
4265
4266    const MCExpr *Expr;
4267    if (getParser().ParseExpression(Expr))
4268      return true;
4269    // Range check the immediate.
4270    // lsl, ror: 0 <= imm <= 31
4271    // lsr, asr: 0 <= imm <= 32
4272    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4273    if (!CE)
4274      return Error(Loc, "shift amount must be an immediate");
4275    int64_t Imm = CE->getValue();
4276    if (Imm < 0 ||
4277        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4278        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4279      return Error(Loc, "immediate shift value out of range");
4280    Amount = Imm;
4281  }
4282
4283  return false;
4284}
4285
4286/// parseFPImm - A floating point immediate expression operand.
4287ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4288parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4289  // Anything that can accept a floating point constant as an operand
4290  // needs to go through here, as the regular ParseExpression is
4291  // integer only.
4292  //
4293  // This routine still creates a generic Immediate operand, containing
4294  // a bitcast of the 64-bit floating point value. The various operands
4295  // that accept floats can check whether the value is valid for them
4296  // via the standard is*() predicates.
4297
4298  SMLoc S = Parser.getTok().getLoc();
4299
4300  if (Parser.getTok().isNot(AsmToken::Hash) &&
4301      Parser.getTok().isNot(AsmToken::Dollar))
4302    return MatchOperand_NoMatch;
4303
4304  // Disambiguate the VMOV forms that can accept an FP immediate.
4305  // vmov.f32 <sreg>, #imm
4306  // vmov.f64 <dreg>, #imm
4307  // vmov.f32 <dreg>, #imm  @ vector f32x2
4308  // vmov.f32 <qreg>, #imm  @ vector f32x4
4309  //
4310  // There are also the NEON VMOV instructions which expect an
4311  // integer constant. Make sure we don't try to parse an FPImm
4312  // for these:
4313  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4314  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4315  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4316                           TyOp->getToken() != ".f64"))
4317    return MatchOperand_NoMatch;
4318
4319  Parser.Lex(); // Eat the '#'.
4320
4321  // Handle negation, as that still comes through as a separate token.
4322  bool isNegative = false;
4323  if (Parser.getTok().is(AsmToken::Minus)) {
4324    isNegative = true;
4325    Parser.Lex();
4326  }
4327  const AsmToken &Tok = Parser.getTok();
4328  SMLoc Loc = Tok.getLoc();
4329  if (Tok.is(AsmToken::Real)) {
4330    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4331    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4332    // If we had a '-' in front, toggle the sign bit.
4333    IntVal ^= (uint64_t)isNegative << 31;
4334    Parser.Lex(); // Eat the token.
4335    Operands.push_back(ARMOperand::CreateImm(
4336          MCConstantExpr::Create(IntVal, getContext()),
4337          S, Parser.getTok().getLoc()));
4338    return MatchOperand_Success;
4339  }
4340  // Also handle plain integers. Instructions which allow floating point
4341  // immediates also allow a raw encoded 8-bit value.
4342  if (Tok.is(AsmToken::Integer)) {
4343    int64_t Val = Tok.getIntVal();
4344    Parser.Lex(); // Eat the token.
4345    if (Val > 255 || Val < 0) {
4346      Error(Loc, "encoded floating point value out of range");
4347      return MatchOperand_ParseFail;
4348    }
4349    double RealVal = ARM_AM::getFPImmFloat(Val);
4350    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4351    Operands.push_back(ARMOperand::CreateImm(
4352        MCConstantExpr::Create(Val, getContext()), S,
4353        Parser.getTok().getLoc()));
4354    return MatchOperand_Success;
4355  }
4356
4357  Error(Loc, "invalid floating point immediate");
4358  return MatchOperand_ParseFail;
4359}
4360
4361/// Parse a arm instruction operand.  For now this parses the operand regardless
4362/// of the mnemonic.
4363bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4364                                StringRef Mnemonic) {
4365  SMLoc S, E;
4366
4367  // Check if the current operand has a custom associated parser, if so, try to
4368  // custom parse the operand, or fallback to the general approach.
4369  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4370  if (ResTy == MatchOperand_Success)
4371    return false;
4372  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4373  // there was a match, but an error occurred, in which case, just return that
4374  // the operand parsing failed.
4375  if (ResTy == MatchOperand_ParseFail)
4376    return true;
4377
4378  switch (getLexer().getKind()) {
4379  default:
4380    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4381    return true;
4382  case AsmToken::Identifier: {
4383    if (!tryParseRegisterWithWriteBack(Operands))
4384      return false;
4385    int Res = tryParseShiftRegister(Operands);
4386    if (Res == 0) // success
4387      return false;
4388    else if (Res == -1) // irrecoverable error
4389      return true;
4390    // If this is VMRS, check for the apsr_nzcv operand.
4391    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4392      S = Parser.getTok().getLoc();
4393      Parser.Lex();
4394      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4395      return false;
4396    }
4397
4398    // Fall though for the Identifier case that is not a register or a
4399    // special name.
4400  }
4401  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4402  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4403  case AsmToken::String:  // quoted label names.
4404  case AsmToken::Dot: {   // . as a branch target
4405    // This was not a register so parse other operands that start with an
4406    // identifier (like labels) as expressions and create them as immediates.
4407    const MCExpr *IdVal;
4408    S = Parser.getTok().getLoc();
4409    if (getParser().ParseExpression(IdVal))
4410      return true;
4411    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4412    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4413    return false;
4414  }
4415  case AsmToken::LBrac:
4416    return parseMemory(Operands);
4417  case AsmToken::LCurly:
4418    return parseRegisterList(Operands);
4419  case AsmToken::Dollar:
4420  case AsmToken::Hash: {
4421    // #42 -> immediate.
4422    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4423    S = Parser.getTok().getLoc();
4424    Parser.Lex();
4425    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4426    const MCExpr *ImmVal;
4427    if (getParser().ParseExpression(ImmVal))
4428      return true;
4429    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4430    if (CE) {
4431      int32_t Val = CE->getValue();
4432      if (isNegative && Val == 0)
4433        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4434    }
4435    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4436    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4437    return false;
4438  }
4439  case AsmToken::Colon: {
4440    // ":lower16:" and ":upper16:" expression prefixes
4441    // FIXME: Check it's an expression prefix,
4442    // e.g. (FOO - :lower16:BAR) isn't legal.
4443    ARMMCExpr::VariantKind RefKind;
4444    if (parsePrefix(RefKind))
4445      return true;
4446
4447    const MCExpr *SubExprVal;
4448    if (getParser().ParseExpression(SubExprVal))
4449      return true;
4450
4451    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4452                                                   getContext());
4453    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4454    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4455    return false;
4456  }
4457  }
4458}
4459
4460// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4461//  :lower16: and :upper16:.
4462bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4463  RefKind = ARMMCExpr::VK_ARM_None;
4464
4465  // :lower16: and :upper16: modifiers
4466  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4467  Parser.Lex(); // Eat ':'
4468
4469  if (getLexer().isNot(AsmToken::Identifier)) {
4470    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4471    return true;
4472  }
4473
4474  StringRef IDVal = Parser.getTok().getIdentifier();
4475  if (IDVal == "lower16") {
4476    RefKind = ARMMCExpr::VK_ARM_LO16;
4477  } else if (IDVal == "upper16") {
4478    RefKind = ARMMCExpr::VK_ARM_HI16;
4479  } else {
4480    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4481    return true;
4482  }
4483  Parser.Lex();
4484
4485  if (getLexer().isNot(AsmToken::Colon)) {
4486    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4487    return true;
4488  }
4489  Parser.Lex(); // Eat the last ':'
4490  return false;
4491}
4492
4493/// \brief Given a mnemonic, split out possible predication code and carry
4494/// setting letters to form a canonical mnemonic and flags.
4495//
4496// FIXME: Would be nice to autogen this.
4497// FIXME: This is a bit of a maze of special cases.
4498StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4499                                      unsigned &PredicationCode,
4500                                      bool &CarrySetting,
4501                                      unsigned &ProcessorIMod,
4502                                      StringRef &ITMask) {
4503  PredicationCode = ARMCC::AL;
4504  CarrySetting = false;
4505  ProcessorIMod = 0;
4506
4507  // Ignore some mnemonics we know aren't predicated forms.
4508  //
4509  // FIXME: Would be nice to autogen this.
4510  if ((Mnemonic == "movs" && isThumb()) ||
4511      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4512      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4513      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4514      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4515      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4516      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4517      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4518      Mnemonic == "fmuls")
4519    return Mnemonic;
4520
4521  // First, split out any predication code. Ignore mnemonics we know aren't
4522  // predicated but do have a carry-set and so weren't caught above.
4523  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4524      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4525      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4526      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4527    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4528      .Case("eq", ARMCC::EQ)
4529      .Case("ne", ARMCC::NE)
4530      .Case("hs", ARMCC::HS)
4531      .Case("cs", ARMCC::HS)
4532      .Case("lo", ARMCC::LO)
4533      .Case("cc", ARMCC::LO)
4534      .Case("mi", ARMCC::MI)
4535      .Case("pl", ARMCC::PL)
4536      .Case("vs", ARMCC::VS)
4537      .Case("vc", ARMCC::VC)
4538      .Case("hi", ARMCC::HI)
4539      .Case("ls", ARMCC::LS)
4540      .Case("ge", ARMCC::GE)
4541      .Case("lt", ARMCC::LT)
4542      .Case("gt", ARMCC::GT)
4543      .Case("le", ARMCC::LE)
4544      .Case("al", ARMCC::AL)
4545      .Default(~0U);
4546    if (CC != ~0U) {
4547      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4548      PredicationCode = CC;
4549    }
4550  }
4551
4552  // Next, determine if we have a carry setting bit. We explicitly ignore all
4553  // the instructions we know end in 's'.
4554  if (Mnemonic.endswith("s") &&
4555      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4556        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4557        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4558        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4559        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4560        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4561        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4562        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4563        (Mnemonic == "movs" && isThumb()))) {
4564    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4565    CarrySetting = true;
4566  }
4567
4568  // The "cps" instruction can have a interrupt mode operand which is glued into
4569  // the mnemonic. Check if this is the case, split it and parse the imod op
4570  if (Mnemonic.startswith("cps")) {
4571    // Split out any imod code.
4572    unsigned IMod =
4573      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4574      .Case("ie", ARM_PROC::IE)
4575      .Case("id", ARM_PROC::ID)
4576      .Default(~0U);
4577    if (IMod != ~0U) {
4578      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4579      ProcessorIMod = IMod;
4580    }
4581  }
4582
4583  // The "it" instruction has the condition mask on the end of the mnemonic.
4584  if (Mnemonic.startswith("it")) {
4585    ITMask = Mnemonic.slice(2, Mnemonic.size());
4586    Mnemonic = Mnemonic.slice(0, 2);
4587  }
4588
4589  return Mnemonic;
4590}
4591
4592/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4593/// inclusion of carry set or predication code operands.
4594//
4595// FIXME: It would be nice to autogen this.
4596void ARMAsmParser::
4597getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4598                      bool &CanAcceptPredicationCode) {
4599  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4600      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4601      Mnemonic == "add" || Mnemonic == "adc" ||
4602      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4603      Mnemonic == "orr" || Mnemonic == "mvn" ||
4604      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4605      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4606      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4607                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4608                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4609    CanAcceptCarrySet = true;
4610  } else
4611    CanAcceptCarrySet = false;
4612
4613  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4614      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4615      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4616      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4617      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4618      (Mnemonic == "clrex" && !isThumb()) ||
4619      (Mnemonic == "nop" && isThumbOne()) ||
4620      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4621        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4622        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4623      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4624       !isThumb()) ||
4625      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4626    CanAcceptPredicationCode = false;
4627  } else
4628    CanAcceptPredicationCode = true;
4629
4630  if (isThumb()) {
4631    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4632        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4633      CanAcceptPredicationCode = false;
4634  }
4635}
4636
4637bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4638                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4639  // FIXME: This is all horribly hacky. We really need a better way to deal
4640  // with optional operands like this in the matcher table.
4641
4642  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4643  // another does not. Specifically, the MOVW instruction does not. So we
4644  // special case it here and remove the defaulted (non-setting) cc_out
4645  // operand if that's the instruction we're trying to match.
4646  //
4647  // We do this as post-processing of the explicit operands rather than just
4648  // conditionally adding the cc_out in the first place because we need
4649  // to check the type of the parsed immediate operand.
4650  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4651      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4652      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4653      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4654    return true;
4655
4656  // Register-register 'add' for thumb does not have a cc_out operand
4657  // when there are only two register operands.
4658  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4659      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4660      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4661      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4662    return true;
4663  // Register-register 'add' for thumb does not have a cc_out operand
4664  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4665  // have to check the immediate range here since Thumb2 has a variant
4666  // that can handle a different range and has a cc_out operand.
4667  if (((isThumb() && Mnemonic == "add") ||
4668       (isThumbTwo() && Mnemonic == "sub")) &&
4669      Operands.size() == 6 &&
4670      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4671      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4672      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4673      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4674      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4675       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4676    return true;
4677  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4678  // imm0_4095 variant. That's the least-preferred variant when
4679  // selecting via the generic "add" mnemonic, so to know that we
4680  // should remove the cc_out operand, we have to explicitly check that
4681  // it's not one of the other variants. Ugh.
4682  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4683      Operands.size() == 6 &&
4684      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4685      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4686      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4687    // Nest conditions rather than one big 'if' statement for readability.
4688    //
4689    // If either register is a high reg, it's either one of the SP
4690    // variants (handled above) or a 32-bit encoding, so we just
4691    // check against T3. If the second register is the PC, this is an
4692    // alternate form of ADR, which uses encoding T4, so check for that too.
4693    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4694         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4695        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4696        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4697      return false;
4698    // If both registers are low, we're in an IT block, and the immediate is
4699    // in range, we should use encoding T1 instead, which has a cc_out.
4700    if (inITBlock() &&
4701        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4702        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4703        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4704      return false;
4705
4706    // Otherwise, we use encoding T4, which does not have a cc_out
4707    // operand.
4708    return true;
4709  }
4710
4711  // The thumb2 multiply instruction doesn't have a CCOut register, so
4712  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4713  // use the 16-bit encoding or not.
4714  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4715      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4716      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4717      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4718      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4719      // If the registers aren't low regs, the destination reg isn't the
4720      // same as one of the source regs, or the cc_out operand is zero
4721      // outside of an IT block, we have to use the 32-bit encoding, so
4722      // remove the cc_out operand.
4723      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4724       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4725       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4726       !inITBlock() ||
4727       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4728        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4729        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4730        static_cast<ARMOperand*>(Operands[4])->getReg())))
4731    return true;
4732
4733  // Also check the 'mul' syntax variant that doesn't specify an explicit
4734  // destination register.
4735  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4736      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4737      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4738      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4739      // If the registers aren't low regs  or the cc_out operand is zero
4740      // outside of an IT block, we have to use the 32-bit encoding, so
4741      // remove the cc_out operand.
4742      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4743       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4744       !inITBlock()))
4745    return true;
4746
4747
4748
4749  // Register-register 'add/sub' for thumb does not have a cc_out operand
4750  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4751  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4752  // right, this will result in better diagnostics (which operand is off)
4753  // anyway.
4754  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4755      (Operands.size() == 5 || Operands.size() == 6) &&
4756      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4757      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4758      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4759    return true;
4760
4761  return false;
4762}
4763
4764static bool isDataTypeToken(StringRef Tok) {
4765  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4766    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4767    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4768    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4769    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4770    Tok == ".f" || Tok == ".d";
4771}
4772
4773// FIXME: This bit should probably be handled via an explicit match class
4774// in the .td files that matches the suffix instead of having it be
4775// a literal string token the way it is now.
4776static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4777  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4778}
4779
4780static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4781/// Parse an arm instruction mnemonic followed by its operands.
4782bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4783                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4784  // Apply mnemonic aliases before doing anything else, as the destination
4785  // mnemnonic may include suffices and we want to handle them normally.
4786  // The generic tblgen'erated code does this later, at the start of
4787  // MatchInstructionImpl(), but that's too late for aliases that include
4788  // any sort of suffix.
4789  unsigned AvailableFeatures = getAvailableFeatures();
4790  applyMnemonicAliases(Name, AvailableFeatures);
4791
4792  // First check for the ARM-specific .req directive.
4793  if (Parser.getTok().is(AsmToken::Identifier) &&
4794      Parser.getTok().getIdentifier() == ".req") {
4795    parseDirectiveReq(Name, NameLoc);
4796    // We always return 'error' for this, as we're done with this
4797    // statement and don't need to match the 'instruction."
4798    return true;
4799  }
4800
4801  // Create the leading tokens for the mnemonic, split by '.' characters.
4802  size_t Start = 0, Next = Name.find('.');
4803  StringRef Mnemonic = Name.slice(Start, Next);
4804
4805  // Split out the predication code and carry setting flag from the mnemonic.
4806  unsigned PredicationCode;
4807  unsigned ProcessorIMod;
4808  bool CarrySetting;
4809  StringRef ITMask;
4810  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4811                           ProcessorIMod, ITMask);
4812
4813  // In Thumb1, only the branch (B) instruction can be predicated.
4814  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4815    Parser.EatToEndOfStatement();
4816    return Error(NameLoc, "conditional execution not supported in Thumb1");
4817  }
4818
4819  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4820
4821  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4822  // is the mask as it will be for the IT encoding if the conditional
4823  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4824  // where the conditional bit0 is zero, the instruction post-processing
4825  // will adjust the mask accordingly.
4826  if (Mnemonic == "it") {
4827    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4828    if (ITMask.size() > 3) {
4829      Parser.EatToEndOfStatement();
4830      return Error(Loc, "too many conditions on IT instruction");
4831    }
4832    unsigned Mask = 8;
4833    for (unsigned i = ITMask.size(); i != 0; --i) {
4834      char pos = ITMask[i - 1];
4835      if (pos != 't' && pos != 'e') {
4836        Parser.EatToEndOfStatement();
4837        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4838      }
4839      Mask >>= 1;
4840      if (ITMask[i - 1] == 't')
4841        Mask |= 8;
4842    }
4843    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4844  }
4845
4846  // FIXME: This is all a pretty gross hack. We should automatically handle
4847  // optional operands like this via tblgen.
4848
4849  // Next, add the CCOut and ConditionCode operands, if needed.
4850  //
4851  // For mnemonics which can ever incorporate a carry setting bit or predication
4852  // code, our matching model involves us always generating CCOut and
4853  // ConditionCode operands to match the mnemonic "as written" and then we let
4854  // the matcher deal with finding the right instruction or generating an
4855  // appropriate error.
4856  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4857  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4858
4859  // If we had a carry-set on an instruction that can't do that, issue an
4860  // error.
4861  if (!CanAcceptCarrySet && CarrySetting) {
4862    Parser.EatToEndOfStatement();
4863    return Error(NameLoc, "instruction '" + Mnemonic +
4864                 "' can not set flags, but 's' suffix specified");
4865  }
4866  // If we had a predication code on an instruction that can't do that, issue an
4867  // error.
4868  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4869    Parser.EatToEndOfStatement();
4870    return Error(NameLoc, "instruction '" + Mnemonic +
4871                 "' is not predicable, but condition code specified");
4872  }
4873
4874  // Add the carry setting operand, if necessary.
4875  if (CanAcceptCarrySet) {
4876    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4877    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4878                                               Loc));
4879  }
4880
4881  // Add the predication code operand, if necessary.
4882  if (CanAcceptPredicationCode) {
4883    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4884                                      CarrySetting);
4885    Operands.push_back(ARMOperand::CreateCondCode(
4886                         ARMCC::CondCodes(PredicationCode), Loc));
4887  }
4888
4889  // Add the processor imod operand, if necessary.
4890  if (ProcessorIMod) {
4891    Operands.push_back(ARMOperand::CreateImm(
4892          MCConstantExpr::Create(ProcessorIMod, getContext()),
4893                                 NameLoc, NameLoc));
4894  }
4895
4896  // Add the remaining tokens in the mnemonic.
4897  while (Next != StringRef::npos) {
4898    Start = Next;
4899    Next = Name.find('.', Start + 1);
4900    StringRef ExtraToken = Name.slice(Start, Next);
4901
4902    // Some NEON instructions have an optional datatype suffix that is
4903    // completely ignored. Check for that.
4904    if (isDataTypeToken(ExtraToken) &&
4905        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4906      continue;
4907
4908    if (ExtraToken != ".n") {
4909      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4910      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4911    }
4912  }
4913
4914  // Read the remaining operands.
4915  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4916    // Read the first operand.
4917    if (parseOperand(Operands, Mnemonic)) {
4918      Parser.EatToEndOfStatement();
4919      return true;
4920    }
4921
4922    while (getLexer().is(AsmToken::Comma)) {
4923      Parser.Lex();  // Eat the comma.
4924
4925      // Parse and remember the operand.
4926      if (parseOperand(Operands, Mnemonic)) {
4927        Parser.EatToEndOfStatement();
4928        return true;
4929      }
4930    }
4931  }
4932
4933  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4934    SMLoc Loc = getLexer().getLoc();
4935    Parser.EatToEndOfStatement();
4936    return Error(Loc, "unexpected token in argument list");
4937  }
4938
4939  Parser.Lex(); // Consume the EndOfStatement
4940
4941  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4942  // do and don't have a cc_out optional-def operand. With some spot-checks
4943  // of the operand list, we can figure out which variant we're trying to
4944  // parse and adjust accordingly before actually matching. We shouldn't ever
4945  // try to remove a cc_out operand that was explicitly set on the the
4946  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4947  // table driven matcher doesn't fit well with the ARM instruction set.
4948  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4949    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4950    Operands.erase(Operands.begin() + 1);
4951    delete Op;
4952  }
4953
4954  // ARM mode 'blx' need special handling, as the register operand version
4955  // is predicable, but the label operand version is not. So, we can't rely
4956  // on the Mnemonic based checking to correctly figure out when to put
4957  // a k_CondCode operand in the list. If we're trying to match the label
4958  // version, remove the k_CondCode operand here.
4959  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4960      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4961    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4962    Operands.erase(Operands.begin() + 1);
4963    delete Op;
4964  }
4965
4966  // The vector-compare-to-zero instructions have a literal token "#0" at
4967  // the end that comes to here as an immediate operand. Convert it to a
4968  // token to play nicely with the matcher.
4969  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4970      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4971      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4972    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4973    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4974    if (CE && CE->getValue() == 0) {
4975      Operands.erase(Operands.begin() + 5);
4976      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4977      delete Op;
4978    }
4979  }
4980  // VCMP{E} does the same thing, but with a different operand count.
4981  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4982      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4983    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4984    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4985    if (CE && CE->getValue() == 0) {
4986      Operands.erase(Operands.begin() + 4);
4987      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4988      delete Op;
4989    }
4990  }
4991  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4992  // end. Convert it to a token here. Take care not to convert those
4993  // that should hit the Thumb2 encoding.
4994  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4995      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4996      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4997      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4998    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4999    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5000    if (CE && CE->getValue() == 0 &&
5001        (isThumbOne() ||
5002         // The cc_out operand matches the IT block.
5003         ((inITBlock() != CarrySetting) &&
5004         // Neither register operand is a high register.
5005         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5006          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5007      Operands.erase(Operands.begin() + 5);
5008      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5009      delete Op;
5010    }
5011  }
5012
5013  return false;
5014}
5015
5016// Validate context-sensitive operand constraints.
5017
5018// return 'true' if register list contains non-low GPR registers,
5019// 'false' otherwise. If Reg is in the register list or is HiReg, set
5020// 'containsReg' to true.
5021static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5022                                 unsigned HiReg, bool &containsReg) {
5023  containsReg = false;
5024  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5025    unsigned OpReg = Inst.getOperand(i).getReg();
5026    if (OpReg == Reg)
5027      containsReg = true;
5028    // Anything other than a low register isn't legal here.
5029    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5030      return true;
5031  }
5032  return false;
5033}
5034
5035// Check if the specified regisgter is in the register list of the inst,
5036// starting at the indicated operand number.
5037static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5038  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5039    unsigned OpReg = Inst.getOperand(i).getReg();
5040    if (OpReg == Reg)
5041      return true;
5042  }
5043  return false;
5044}
5045
5046// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5047// the ARMInsts array) instead. Getting that here requires awkward
5048// API changes, though. Better way?
5049namespace llvm {
5050extern const MCInstrDesc ARMInsts[];
5051}
5052static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5053  return ARMInsts[Opcode];
5054}
5055
5056// FIXME: We would really like to be able to tablegen'erate this.
5057bool ARMAsmParser::
5058validateInstruction(MCInst &Inst,
5059                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5060  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5061  SMLoc Loc = Operands[0]->getStartLoc();
5062  // Check the IT block state first.
5063  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
5064  // being allowed in IT blocks, but not being predicable.  It just always
5065  // executes.
5066  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
5067    unsigned bit = 1;
5068    if (ITState.FirstCond)
5069      ITState.FirstCond = false;
5070    else
5071      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5072    // The instruction must be predicable.
5073    if (!MCID.isPredicable())
5074      return Error(Loc, "instructions in IT block must be predicable");
5075    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5076    unsigned ITCond = bit ? ITState.Cond :
5077      ARMCC::getOppositeCondition(ITState.Cond);
5078    if (Cond != ITCond) {
5079      // Find the condition code Operand to get its SMLoc information.
5080      SMLoc CondLoc;
5081      for (unsigned i = 1; i < Operands.size(); ++i)
5082        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5083          CondLoc = Operands[i]->getStartLoc();
5084      return Error(CondLoc, "incorrect condition in IT block; got '" +
5085                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5086                   "', but expected '" +
5087                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5088    }
5089  // Check for non-'al' condition codes outside of the IT block.
5090  } else if (isThumbTwo() && MCID.isPredicable() &&
5091             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5092             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5093             Inst.getOpcode() != ARM::t2B)
5094    return Error(Loc, "predicated instructions must be in IT block");
5095
5096  switch (Inst.getOpcode()) {
5097  case ARM::LDRD:
5098  case ARM::LDRD_PRE:
5099  case ARM::LDRD_POST:
5100  case ARM::LDREXD: {
5101    // Rt2 must be Rt + 1.
5102    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5103    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5104    if (Rt2 != Rt + 1)
5105      return Error(Operands[3]->getStartLoc(),
5106                   "destination operands must be sequential");
5107    return false;
5108  }
5109  case ARM::STRD: {
5110    // Rt2 must be Rt + 1.
5111    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5112    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5113    if (Rt2 != Rt + 1)
5114      return Error(Operands[3]->getStartLoc(),
5115                   "source operands must be sequential");
5116    return false;
5117  }
5118  case ARM::STRD_PRE:
5119  case ARM::STRD_POST:
5120  case ARM::STREXD: {
5121    // Rt2 must be Rt + 1.
5122    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5123    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5124    if (Rt2 != Rt + 1)
5125      return Error(Operands[3]->getStartLoc(),
5126                   "source operands must be sequential");
5127    return false;
5128  }
5129  case ARM::SBFX:
5130  case ARM::UBFX: {
5131    // width must be in range [1, 32-lsb]
5132    unsigned lsb = Inst.getOperand(2).getImm();
5133    unsigned widthm1 = Inst.getOperand(3).getImm();
5134    if (widthm1 >= 32 - lsb)
5135      return Error(Operands[5]->getStartLoc(),
5136                   "bitfield width must be in range [1,32-lsb]");
5137    return false;
5138  }
5139  case ARM::tLDMIA: {
5140    // If we're parsing Thumb2, the .w variant is available and handles
5141    // most cases that are normally illegal for a Thumb1 LDM
5142    // instruction. We'll make the transformation in processInstruction()
5143    // if necessary.
5144    //
5145    // Thumb LDM instructions are writeback iff the base register is not
5146    // in the register list.
5147    unsigned Rn = Inst.getOperand(0).getReg();
5148    bool hasWritebackToken =
5149      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5150       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5151    bool listContainsBase;
5152    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5153      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5154                   "registers must be in range r0-r7");
5155    // If we should have writeback, then there should be a '!' token.
5156    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5157      return Error(Operands[2]->getStartLoc(),
5158                   "writeback operator '!' expected");
5159    // If we should not have writeback, there must not be a '!'. This is
5160    // true even for the 32-bit wide encodings.
5161    if (listContainsBase && hasWritebackToken)
5162      return Error(Operands[3]->getStartLoc(),
5163                   "writeback operator '!' not allowed when base register "
5164                   "in register list");
5165
5166    break;
5167  }
5168  case ARM::t2LDMIA_UPD: {
5169    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5170      return Error(Operands[4]->getStartLoc(),
5171                   "writeback operator '!' not allowed when base register "
5172                   "in register list");
5173    break;
5174  }
5175  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5176  // so only issue a diagnostic for thumb1. The instructions will be
5177  // switched to the t2 encodings in processInstruction() if necessary.
5178  case ARM::tPOP: {
5179    bool listContainsBase;
5180    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5181        !isThumbTwo())
5182      return Error(Operands[2]->getStartLoc(),
5183                   "registers must be in range r0-r7 or pc");
5184    break;
5185  }
5186  case ARM::tPUSH: {
5187    bool listContainsBase;
5188    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5189        !isThumbTwo())
5190      return Error(Operands[2]->getStartLoc(),
5191                   "registers must be in range r0-r7 or lr");
5192    break;
5193  }
5194  case ARM::tSTMIA_UPD: {
5195    bool listContainsBase;
5196    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5197      return Error(Operands[4]->getStartLoc(),
5198                   "registers must be in range r0-r7");
5199    break;
5200  }
5201  }
5202
5203  return false;
5204}
5205
5206static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5207  switch(Opc) {
5208  default: assert(0 && "unexpected opcode!");
5209  // VST1LN
5210  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5211  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5212  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5213  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5214  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5215  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5216  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5217  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5218  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5219
5220  // VST2LN
5221  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5222  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5223  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5224  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5225  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5226
5227  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5228  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5229  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5230  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5231  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5232
5233  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5234  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5235  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5236  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5237  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5238
5239  // VST3LN
5240  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5241  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5242  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5243  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5244  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5245  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5246  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5247  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5248  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5249  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5250  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5251  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5252  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5253  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5254  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5255
5256  // VST3
5257  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5258  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5259  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5260  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5261  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5262  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5263  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5264  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5265  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5266  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5267  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5268  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5269  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5270  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5271  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5272  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5273  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5274  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5275
5276  // VST4
5277  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5278  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5279  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5280  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5281  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5282  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5283  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5284  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5285  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5286  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5287  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5288  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5289  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5290  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5291  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5292  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5293  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5294  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5295  }
5296}
5297
5298static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5299  switch(Opc) {
5300  default: assert(0 && "unexpected opcode!");
5301  // VLD1LN
5302  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5303  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5304  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5305  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5306  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5307  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5308  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5309  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5310  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5311
5312  // VLD2LN
5313  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5314  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5315  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5316  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5317  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5318  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5319  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5320  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5321  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5322  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5323  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5324  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5325  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5326  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5327  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5328
5329  // VLD3LN
5330  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5331  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5332  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5333  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5334  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5335  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5336  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5337  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5338  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5339  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5340  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5341  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5342  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5343  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5344  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5345
5346  // VLD3
5347  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5348  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5349  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5350  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5351  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5352  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5353  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5354  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5355  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5356  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5357  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5358  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5359  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5360  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5361  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5362  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5363  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5364  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5365
5366  // VLD4LN
5367  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5368  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5369  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5370  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5371  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5372  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5373  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5374  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5375  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5376  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5377  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5378  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5379  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5380  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5381  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5382
5383  // VLD4
5384  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5385  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5386  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5387  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5388  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5389  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5390  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5391  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5392  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5393  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5394  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5395  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5396  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5397  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5398  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5399  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5400  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5401  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5402  }
5403}
5404
5405bool ARMAsmParser::
5406processInstruction(MCInst &Inst,
5407                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5408  switch (Inst.getOpcode()) {
5409  // Aliases for alternate PC+imm syntax of LDR instructions.
5410  case ARM::t2LDRpcrel:
5411    Inst.setOpcode(ARM::t2LDRpci);
5412    return true;
5413  case ARM::t2LDRBpcrel:
5414    Inst.setOpcode(ARM::t2LDRBpci);
5415    return true;
5416  case ARM::t2LDRHpcrel:
5417    Inst.setOpcode(ARM::t2LDRHpci);
5418    return true;
5419  case ARM::t2LDRSBpcrel:
5420    Inst.setOpcode(ARM::t2LDRSBpci);
5421    return true;
5422  case ARM::t2LDRSHpcrel:
5423    Inst.setOpcode(ARM::t2LDRSHpci);
5424    return true;
5425  // Handle NEON VST complex aliases.
5426  case ARM::VST1LNdWB_register_Asm_8:
5427  case ARM::VST1LNdWB_register_Asm_16:
5428  case ARM::VST1LNdWB_register_Asm_32: {
5429    MCInst TmpInst;
5430    // Shuffle the operands around so the lane index operand is in the
5431    // right place.
5432    unsigned Spacing;
5433    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5434    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5435    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5436    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5437    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5438    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5439    TmpInst.addOperand(Inst.getOperand(1)); // lane
5440    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5441    TmpInst.addOperand(Inst.getOperand(6));
5442    Inst = TmpInst;
5443    return true;
5444  }
5445
5446  case ARM::VST2LNdWB_register_Asm_8:
5447  case ARM::VST2LNdWB_register_Asm_16:
5448  case ARM::VST2LNdWB_register_Asm_32:
5449  case ARM::VST2LNqWB_register_Asm_16:
5450  case ARM::VST2LNqWB_register_Asm_32: {
5451    MCInst TmpInst;
5452    // Shuffle the operands around so the lane index operand is in the
5453    // right place.
5454    unsigned Spacing;
5455    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5456    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5457    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5458    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5459    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5460    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5461    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5462                                            Spacing));
5463    TmpInst.addOperand(Inst.getOperand(1)); // lane
5464    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5465    TmpInst.addOperand(Inst.getOperand(6));
5466    Inst = TmpInst;
5467    return true;
5468  }
5469
5470  case ARM::VST3LNdWB_register_Asm_8:
5471  case ARM::VST3LNdWB_register_Asm_16:
5472  case ARM::VST3LNdWB_register_Asm_32:
5473  case ARM::VST3LNqWB_register_Asm_16:
5474  case ARM::VST3LNqWB_register_Asm_32: {
5475    MCInst TmpInst;
5476    // Shuffle the operands around so the lane index operand is in the
5477    // right place.
5478    unsigned Spacing;
5479    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5480    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5481    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5482    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5483    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5484    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5485    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5486                                            Spacing));
5487    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5488                                            Spacing * 2));
5489    TmpInst.addOperand(Inst.getOperand(1)); // lane
5490    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5491    TmpInst.addOperand(Inst.getOperand(6));
5492    Inst = TmpInst;
5493    return true;
5494  }
5495
5496  case ARM::VST1LNdWB_fixed_Asm_8:
5497  case ARM::VST1LNdWB_fixed_Asm_16:
5498  case ARM::VST1LNdWB_fixed_Asm_32: {
5499    MCInst TmpInst;
5500    // Shuffle the operands around so the lane index operand is in the
5501    // right place.
5502    unsigned Spacing;
5503    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5504    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5505    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5506    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5507    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5508    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5509    TmpInst.addOperand(Inst.getOperand(1)); // lane
5510    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5511    TmpInst.addOperand(Inst.getOperand(5));
5512    Inst = TmpInst;
5513    return true;
5514  }
5515
5516  case ARM::VST2LNdWB_fixed_Asm_8:
5517  case ARM::VST2LNdWB_fixed_Asm_16:
5518  case ARM::VST2LNdWB_fixed_Asm_32:
5519  case ARM::VST2LNqWB_fixed_Asm_16:
5520  case ARM::VST2LNqWB_fixed_Asm_32: {
5521    MCInst TmpInst;
5522    // Shuffle the operands around so the lane index operand is in the
5523    // right place.
5524    unsigned Spacing;
5525    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5526    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5527    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5528    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5529    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5530    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5531    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5532                                            Spacing));
5533    TmpInst.addOperand(Inst.getOperand(1)); // lane
5534    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5535    TmpInst.addOperand(Inst.getOperand(5));
5536    Inst = TmpInst;
5537    return true;
5538  }
5539
5540  case ARM::VST3LNdWB_fixed_Asm_8:
5541  case ARM::VST3LNdWB_fixed_Asm_16:
5542  case ARM::VST3LNdWB_fixed_Asm_32:
5543  case ARM::VST3LNqWB_fixed_Asm_16:
5544  case ARM::VST3LNqWB_fixed_Asm_32: {
5545    MCInst TmpInst;
5546    // Shuffle the operands around so the lane index operand is in the
5547    // right place.
5548    unsigned Spacing;
5549    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5550    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5551    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5552    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5553    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5554    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5555    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5556                                            Spacing));
5557    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5558                                            Spacing * 2));
5559    TmpInst.addOperand(Inst.getOperand(1)); // lane
5560    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5561    TmpInst.addOperand(Inst.getOperand(5));
5562    Inst = TmpInst;
5563    return true;
5564  }
5565
5566  case ARM::VST1LNdAsm_8:
5567  case ARM::VST1LNdAsm_16:
5568  case ARM::VST1LNdAsm_32: {
5569    MCInst TmpInst;
5570    // Shuffle the operands around so the lane index operand is in the
5571    // right place.
5572    unsigned Spacing;
5573    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5574    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5575    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5576    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5577    TmpInst.addOperand(Inst.getOperand(1)); // lane
5578    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5579    TmpInst.addOperand(Inst.getOperand(5));
5580    Inst = TmpInst;
5581    return true;
5582  }
5583
5584  case ARM::VST2LNdAsm_8:
5585  case ARM::VST2LNdAsm_16:
5586  case ARM::VST2LNdAsm_32:
5587  case ARM::VST2LNqAsm_16:
5588  case ARM::VST2LNqAsm_32: {
5589    MCInst TmpInst;
5590    // Shuffle the operands around so the lane index operand is in the
5591    // right place.
5592    unsigned Spacing;
5593    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5594    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5595    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5596    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5597    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5598                                            Spacing));
5599    TmpInst.addOperand(Inst.getOperand(1)); // lane
5600    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5601    TmpInst.addOperand(Inst.getOperand(5));
5602    Inst = TmpInst;
5603    return true;
5604  }
5605
5606  case ARM::VST3LNdAsm_8:
5607  case ARM::VST3LNdAsm_16:
5608  case ARM::VST3LNdAsm_32:
5609  case ARM::VST3LNqAsm_16:
5610  case ARM::VST3LNqAsm_32: {
5611    MCInst TmpInst;
5612    // Shuffle the operands around so the lane index operand is in the
5613    // right place.
5614    unsigned Spacing;
5615    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5616    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5617    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5618    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5619    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5620                                            Spacing));
5621    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5622                                            Spacing * 2));
5623    TmpInst.addOperand(Inst.getOperand(1)); // lane
5624    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5625    TmpInst.addOperand(Inst.getOperand(5));
5626    Inst = TmpInst;
5627    return true;
5628  }
5629
5630  // Handle NEON VLD complex aliases.
5631  case ARM::VLD1LNdWB_register_Asm_8:
5632  case ARM::VLD1LNdWB_register_Asm_16:
5633  case ARM::VLD1LNdWB_register_Asm_32: {
5634    MCInst TmpInst;
5635    // Shuffle the operands around so the lane index operand is in the
5636    // right place.
5637    unsigned Spacing;
5638    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5639    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5640    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5641    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5642    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5643    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5644    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5645    TmpInst.addOperand(Inst.getOperand(1)); // lane
5646    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5647    TmpInst.addOperand(Inst.getOperand(6));
5648    Inst = TmpInst;
5649    return true;
5650  }
5651
5652  case ARM::VLD2LNdWB_register_Asm_8:
5653  case ARM::VLD2LNdWB_register_Asm_16:
5654  case ARM::VLD2LNdWB_register_Asm_32:
5655  case ARM::VLD2LNqWB_register_Asm_16:
5656  case ARM::VLD2LNqWB_register_Asm_32: {
5657    MCInst TmpInst;
5658    // Shuffle the operands around so the lane index operand is in the
5659    // right place.
5660    unsigned Spacing;
5661    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5662    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5663    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5664                                            Spacing));
5665    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5666    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5667    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5668    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5669    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5670    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5671                                            Spacing));
5672    TmpInst.addOperand(Inst.getOperand(1)); // lane
5673    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5674    TmpInst.addOperand(Inst.getOperand(6));
5675    Inst = TmpInst;
5676    return true;
5677  }
5678
5679  case ARM::VLD3LNdWB_register_Asm_8:
5680  case ARM::VLD3LNdWB_register_Asm_16:
5681  case ARM::VLD3LNdWB_register_Asm_32:
5682  case ARM::VLD3LNqWB_register_Asm_16:
5683  case ARM::VLD3LNqWB_register_Asm_32: {
5684    MCInst TmpInst;
5685    // Shuffle the operands around so the lane index operand is in the
5686    // right place.
5687    unsigned Spacing;
5688    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5689    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5690    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5691                                            Spacing));
5692    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5693                                            Spacing * 2));
5694    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5695    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5696    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5697    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5698    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5699    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5700                                            Spacing));
5701    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5702                                            Spacing * 2));
5703    TmpInst.addOperand(Inst.getOperand(1)); // lane
5704    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5705    TmpInst.addOperand(Inst.getOperand(6));
5706    Inst = TmpInst;
5707    return true;
5708  }
5709
5710  case ARM::VLD4LNdWB_register_Asm_8:
5711  case ARM::VLD4LNdWB_register_Asm_16:
5712  case ARM::VLD4LNdWB_register_Asm_32:
5713  case ARM::VLD4LNqWB_register_Asm_16:
5714  case ARM::VLD4LNqWB_register_Asm_32: {
5715    MCInst TmpInst;
5716    // Shuffle the operands around so the lane index operand is in the
5717    // right place.
5718    unsigned Spacing;
5719    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5720    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5721    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5722                                            Spacing));
5723    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5724                                            Spacing * 2));
5725    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5726                                            Spacing * 3));
5727    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5728    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5729    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5730    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5731    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5732    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5733                                            Spacing));
5734    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5735                                            Spacing * 2));
5736    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5737                                            Spacing * 3));
5738    TmpInst.addOperand(Inst.getOperand(1)); // lane
5739    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5740    TmpInst.addOperand(Inst.getOperand(6));
5741    Inst = TmpInst;
5742    return true;
5743  }
5744
5745  case ARM::VLD1LNdWB_fixed_Asm_8:
5746  case ARM::VLD1LNdWB_fixed_Asm_16:
5747  case ARM::VLD1LNdWB_fixed_Asm_32: {
5748    MCInst TmpInst;
5749    // Shuffle the operands around so the lane index operand is in the
5750    // right place.
5751    unsigned Spacing;
5752    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5753    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5754    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5755    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5756    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5757    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5758    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5759    TmpInst.addOperand(Inst.getOperand(1)); // lane
5760    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5761    TmpInst.addOperand(Inst.getOperand(5));
5762    Inst = TmpInst;
5763    return true;
5764  }
5765
5766  case ARM::VLD2LNdWB_fixed_Asm_8:
5767  case ARM::VLD2LNdWB_fixed_Asm_16:
5768  case ARM::VLD2LNdWB_fixed_Asm_32:
5769  case ARM::VLD2LNqWB_fixed_Asm_16:
5770  case ARM::VLD2LNqWB_fixed_Asm_32: {
5771    MCInst TmpInst;
5772    // Shuffle the operands around so the lane index operand is in the
5773    // right place.
5774    unsigned Spacing;
5775    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5776    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5777    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5778                                            Spacing));
5779    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5780    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5781    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5782    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5783    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5784    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5785                                            Spacing));
5786    TmpInst.addOperand(Inst.getOperand(1)); // lane
5787    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5788    TmpInst.addOperand(Inst.getOperand(5));
5789    Inst = TmpInst;
5790    return true;
5791  }
5792
5793  case ARM::VLD3LNdWB_fixed_Asm_8:
5794  case ARM::VLD3LNdWB_fixed_Asm_16:
5795  case ARM::VLD3LNdWB_fixed_Asm_32:
5796  case ARM::VLD3LNqWB_fixed_Asm_16:
5797  case ARM::VLD3LNqWB_fixed_Asm_32: {
5798    MCInst TmpInst;
5799    // Shuffle the operands around so the lane index operand is in the
5800    // right place.
5801    unsigned Spacing;
5802    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5803    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5804    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5805                                            Spacing));
5806    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5807                                            Spacing * 2));
5808    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5809    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5810    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5811    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5812    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5813    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5814                                            Spacing));
5815    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5816                                            Spacing * 2));
5817    TmpInst.addOperand(Inst.getOperand(1)); // lane
5818    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5819    TmpInst.addOperand(Inst.getOperand(5));
5820    Inst = TmpInst;
5821    return true;
5822  }
5823
5824  case ARM::VLD4LNdWB_fixed_Asm_8:
5825  case ARM::VLD4LNdWB_fixed_Asm_16:
5826  case ARM::VLD4LNdWB_fixed_Asm_32:
5827  case ARM::VLD4LNqWB_fixed_Asm_16:
5828  case ARM::VLD4LNqWB_fixed_Asm_32: {
5829    MCInst TmpInst;
5830    // Shuffle the operands around so the lane index operand is in the
5831    // right place.
5832    unsigned Spacing;
5833    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5834    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5835    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5836                                            Spacing));
5837    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5838                                            Spacing * 2));
5839    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5840                                            Spacing * 3));
5841    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5842    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5843    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5844    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5845    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5846    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5847                                            Spacing));
5848    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5849                                            Spacing * 2));
5850    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5851                                            Spacing * 3));
5852    TmpInst.addOperand(Inst.getOperand(1)); // lane
5853    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5854    TmpInst.addOperand(Inst.getOperand(5));
5855    Inst = TmpInst;
5856    return true;
5857  }
5858
5859  case ARM::VLD1LNdAsm_8:
5860  case ARM::VLD1LNdAsm_16:
5861  case ARM::VLD1LNdAsm_32: {
5862    MCInst TmpInst;
5863    // Shuffle the operands around so the lane index operand is in the
5864    // right place.
5865    unsigned Spacing;
5866    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5867    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5868    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5869    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5870    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5871    TmpInst.addOperand(Inst.getOperand(1)); // lane
5872    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5873    TmpInst.addOperand(Inst.getOperand(5));
5874    Inst = TmpInst;
5875    return true;
5876  }
5877
5878  case ARM::VLD2LNdAsm_8:
5879  case ARM::VLD2LNdAsm_16:
5880  case ARM::VLD2LNdAsm_32:
5881  case ARM::VLD2LNqAsm_16:
5882  case ARM::VLD2LNqAsm_32: {
5883    MCInst TmpInst;
5884    // Shuffle the operands around so the lane index operand is in the
5885    // right place.
5886    unsigned Spacing;
5887    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5888    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5889    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5890                                            Spacing));
5891    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5892    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5893    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5894    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5895                                            Spacing));
5896    TmpInst.addOperand(Inst.getOperand(1)); // lane
5897    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5898    TmpInst.addOperand(Inst.getOperand(5));
5899    Inst = TmpInst;
5900    return true;
5901  }
5902
5903  case ARM::VLD3LNdAsm_8:
5904  case ARM::VLD3LNdAsm_16:
5905  case ARM::VLD3LNdAsm_32:
5906  case ARM::VLD3LNqAsm_16:
5907  case ARM::VLD3LNqAsm_32: {
5908    MCInst TmpInst;
5909    // Shuffle the operands around so the lane index operand is in the
5910    // right place.
5911    unsigned Spacing;
5912    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5913    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5914    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5915                                            Spacing));
5916    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5917                                            Spacing * 2));
5918    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5919    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5920    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5921    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5922                                            Spacing));
5923    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5924                                            Spacing * 2));
5925    TmpInst.addOperand(Inst.getOperand(1)); // lane
5926    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5927    TmpInst.addOperand(Inst.getOperand(5));
5928    Inst = TmpInst;
5929    return true;
5930  }
5931
5932  case ARM::VLD4LNdAsm_8:
5933  case ARM::VLD4LNdAsm_16:
5934  case ARM::VLD4LNdAsm_32:
5935  case ARM::VLD4LNqAsm_16:
5936  case ARM::VLD4LNqAsm_32: {
5937    MCInst TmpInst;
5938    // Shuffle the operands around so the lane index operand is in the
5939    // right place.
5940    unsigned Spacing;
5941    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5942    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5943    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5944                                            Spacing));
5945    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5946                                            Spacing * 2));
5947    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5948                                            Spacing * 3));
5949    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5950    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5951    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5952    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5953                                            Spacing));
5954    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5955                                            Spacing * 2));
5956    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5957                                            Spacing * 3));
5958    TmpInst.addOperand(Inst.getOperand(1)); // lane
5959    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5960    TmpInst.addOperand(Inst.getOperand(5));
5961    Inst = TmpInst;
5962    return true;
5963  }
5964
5965  // VLD3 multiple 3-element structure instructions.
5966  case ARM::VLD3dAsm_8:
5967  case ARM::VLD3dAsm_16:
5968  case ARM::VLD3dAsm_32:
5969  case ARM::VLD3qAsm_8:
5970  case ARM::VLD3qAsm_16:
5971  case ARM::VLD3qAsm_32: {
5972    MCInst TmpInst;
5973    unsigned Spacing;
5974    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5975    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5976    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5977                                            Spacing));
5978    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5979                                            Spacing * 2));
5980    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5981    TmpInst.addOperand(Inst.getOperand(2)); // alignment
5982    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5983    TmpInst.addOperand(Inst.getOperand(4));
5984    Inst = TmpInst;
5985    return true;
5986  }
5987
5988  case ARM::VLD3dWB_fixed_Asm_8:
5989  case ARM::VLD3dWB_fixed_Asm_16:
5990  case ARM::VLD3dWB_fixed_Asm_32:
5991  case ARM::VLD3qWB_fixed_Asm_8:
5992  case ARM::VLD3qWB_fixed_Asm_16:
5993  case ARM::VLD3qWB_fixed_Asm_32: {
5994    MCInst TmpInst;
5995    unsigned Spacing;
5996    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5997    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5998    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5999                                            Spacing));
6000    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6001                                            Spacing * 2));
6002    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6003    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6004    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6005    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6006    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6007    TmpInst.addOperand(Inst.getOperand(4));
6008    Inst = TmpInst;
6009    return true;
6010  }
6011
6012  case ARM::VLD3dWB_register_Asm_8:
6013  case ARM::VLD3dWB_register_Asm_16:
6014  case ARM::VLD3dWB_register_Asm_32:
6015  case ARM::VLD3qWB_register_Asm_8:
6016  case ARM::VLD3qWB_register_Asm_16:
6017  case ARM::VLD3qWB_register_Asm_32: {
6018    MCInst TmpInst;
6019    unsigned Spacing;
6020    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6021    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6022    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6023                                            Spacing));
6024    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6025                                            Spacing * 2));
6026    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6027    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6028    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6029    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6030    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6031    TmpInst.addOperand(Inst.getOperand(5));
6032    Inst = TmpInst;
6033    return true;
6034  }
6035
6036  // VLD4 multiple 3-element structure instructions.
6037  case ARM::VLD4dAsm_8:
6038  case ARM::VLD4dAsm_16:
6039  case ARM::VLD4dAsm_32:
6040  case ARM::VLD4qAsm_8:
6041  case ARM::VLD4qAsm_16:
6042  case ARM::VLD4qAsm_32: {
6043    MCInst TmpInst;
6044    unsigned Spacing;
6045    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6046    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6047    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6048                                            Spacing));
6049    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6050                                            Spacing * 2));
6051    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6052                                            Spacing * 3));
6053    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6054    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6055    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6056    TmpInst.addOperand(Inst.getOperand(4));
6057    Inst = TmpInst;
6058    return true;
6059  }
6060
6061  case ARM::VLD4dWB_fixed_Asm_8:
6062  case ARM::VLD4dWB_fixed_Asm_16:
6063  case ARM::VLD4dWB_fixed_Asm_32:
6064  case ARM::VLD4qWB_fixed_Asm_8:
6065  case ARM::VLD4qWB_fixed_Asm_16:
6066  case ARM::VLD4qWB_fixed_Asm_32: {
6067    MCInst TmpInst;
6068    unsigned Spacing;
6069    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6070    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6071    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6072                                            Spacing));
6073    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6074                                            Spacing * 2));
6075    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6076                                            Spacing * 3));
6077    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6078    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6079    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6080    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6081    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6082    TmpInst.addOperand(Inst.getOperand(4));
6083    Inst = TmpInst;
6084    return true;
6085  }
6086
6087  case ARM::VLD4dWB_register_Asm_8:
6088  case ARM::VLD4dWB_register_Asm_16:
6089  case ARM::VLD4dWB_register_Asm_32:
6090  case ARM::VLD4qWB_register_Asm_8:
6091  case ARM::VLD4qWB_register_Asm_16:
6092  case ARM::VLD4qWB_register_Asm_32: {
6093    MCInst TmpInst;
6094    unsigned Spacing;
6095    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6096    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6097    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6098                                            Spacing));
6099    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6100                                            Spacing * 2));
6101    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6102                                            Spacing * 3));
6103    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6104    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6105    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6106    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6107    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6108    TmpInst.addOperand(Inst.getOperand(5));
6109    Inst = TmpInst;
6110    return true;
6111  }
6112
6113  // VST3 multiple 3-element structure instructions.
6114  case ARM::VST3dAsm_8:
6115  case ARM::VST3dAsm_16:
6116  case ARM::VST3dAsm_32:
6117  case ARM::VST3qAsm_8:
6118  case ARM::VST3qAsm_16:
6119  case ARM::VST3qAsm_32: {
6120    MCInst TmpInst;
6121    unsigned Spacing;
6122    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6123    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6124    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6125    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6126    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6127                                            Spacing));
6128    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6129                                            Spacing * 2));
6130    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6131    TmpInst.addOperand(Inst.getOperand(4));
6132    Inst = TmpInst;
6133    return true;
6134  }
6135
6136  case ARM::VST3dWB_fixed_Asm_8:
6137  case ARM::VST3dWB_fixed_Asm_16:
6138  case ARM::VST3dWB_fixed_Asm_32:
6139  case ARM::VST3qWB_fixed_Asm_8:
6140  case ARM::VST3qWB_fixed_Asm_16:
6141  case ARM::VST3qWB_fixed_Asm_32: {
6142    MCInst TmpInst;
6143    unsigned Spacing;
6144    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6145    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6146    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6147    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6148    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6149    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6150    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6151                                            Spacing));
6152    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6153                                            Spacing * 2));
6154    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6155    TmpInst.addOperand(Inst.getOperand(4));
6156    Inst = TmpInst;
6157    return true;
6158  }
6159
6160  case ARM::VST3dWB_register_Asm_8:
6161  case ARM::VST3dWB_register_Asm_16:
6162  case ARM::VST3dWB_register_Asm_32:
6163  case ARM::VST3qWB_register_Asm_8:
6164  case ARM::VST3qWB_register_Asm_16:
6165  case ARM::VST3qWB_register_Asm_32: {
6166    MCInst TmpInst;
6167    unsigned Spacing;
6168    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6169    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6170    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6171    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6172    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6173    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6174    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6175                                            Spacing));
6176    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6177                                            Spacing * 2));
6178    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6179    TmpInst.addOperand(Inst.getOperand(5));
6180    Inst = TmpInst;
6181    return true;
6182  }
6183
6184  // VST4 multiple 3-element structure instructions.
6185  case ARM::VST4dAsm_8:
6186  case ARM::VST4dAsm_16:
6187  case ARM::VST4dAsm_32:
6188  case ARM::VST4qAsm_8:
6189  case ARM::VST4qAsm_16:
6190  case ARM::VST4qAsm_32: {
6191    MCInst TmpInst;
6192    unsigned Spacing;
6193    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6194    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6195    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6196    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6197    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6198                                            Spacing));
6199    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6200                                            Spacing * 2));
6201    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6202                                            Spacing * 3));
6203    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6204    TmpInst.addOperand(Inst.getOperand(4));
6205    Inst = TmpInst;
6206    return true;
6207  }
6208
6209  case ARM::VST4dWB_fixed_Asm_8:
6210  case ARM::VST4dWB_fixed_Asm_16:
6211  case ARM::VST4dWB_fixed_Asm_32:
6212  case ARM::VST4qWB_fixed_Asm_8:
6213  case ARM::VST4qWB_fixed_Asm_16:
6214  case ARM::VST4qWB_fixed_Asm_32: {
6215    MCInst TmpInst;
6216    unsigned Spacing;
6217    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6218    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6219    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6220    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6221    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6222    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6223    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6224                                            Spacing));
6225    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6226                                            Spacing * 2));
6227    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6228                                            Spacing * 3));
6229    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6230    TmpInst.addOperand(Inst.getOperand(4));
6231    Inst = TmpInst;
6232    return true;
6233  }
6234
6235  case ARM::VST4dWB_register_Asm_8:
6236  case ARM::VST4dWB_register_Asm_16:
6237  case ARM::VST4dWB_register_Asm_32:
6238  case ARM::VST4qWB_register_Asm_8:
6239  case ARM::VST4qWB_register_Asm_16:
6240  case ARM::VST4qWB_register_Asm_32: {
6241    MCInst TmpInst;
6242    unsigned Spacing;
6243    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6244    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6245    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6246    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6247    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6248    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6249    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6250                                            Spacing));
6251    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6252                                            Spacing * 2));
6253    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6254                                            Spacing * 3));
6255    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6256    TmpInst.addOperand(Inst.getOperand(5));
6257    Inst = TmpInst;
6258    return true;
6259  }
6260
6261  // Handle the Thumb2 mode MOV complex aliases.
6262  case ARM::t2MOVsr:
6263  case ARM::t2MOVSsr: {
6264    // Which instruction to expand to depends on the CCOut operand and
6265    // whether we're in an IT block if the register operands are low
6266    // registers.
6267    bool isNarrow = false;
6268    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6269        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6270        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6271        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6272        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6273      isNarrow = true;
6274    MCInst TmpInst;
6275    unsigned newOpc;
6276    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6277    default: llvm_unreachable("unexpected opcode!");
6278    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6279    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6280    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6281    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6282    }
6283    TmpInst.setOpcode(newOpc);
6284    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6285    if (isNarrow)
6286      TmpInst.addOperand(MCOperand::CreateReg(
6287          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6288    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6289    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6290    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6291    TmpInst.addOperand(Inst.getOperand(5));
6292    if (!isNarrow)
6293      TmpInst.addOperand(MCOperand::CreateReg(
6294          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6295    Inst = TmpInst;
6296    return true;
6297  }
6298  case ARM::t2MOVsi:
6299  case ARM::t2MOVSsi: {
6300    // Which instruction to expand to depends on the CCOut operand and
6301    // whether we're in an IT block if the register operands are low
6302    // registers.
6303    bool isNarrow = false;
6304    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6305        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6306        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6307      isNarrow = true;
6308    MCInst TmpInst;
6309    unsigned newOpc;
6310    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6311    default: llvm_unreachable("unexpected opcode!");
6312    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6313    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6314    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6315    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6316    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6317    }
6318    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6319    if (Ammount == 32) Ammount = 0;
6320    TmpInst.setOpcode(newOpc);
6321    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6322    if (isNarrow)
6323      TmpInst.addOperand(MCOperand::CreateReg(
6324          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6325    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6326    if (newOpc != ARM::t2RRX)
6327      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6328    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6329    TmpInst.addOperand(Inst.getOperand(4));
6330    if (!isNarrow)
6331      TmpInst.addOperand(MCOperand::CreateReg(
6332          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6333    Inst = TmpInst;
6334    return true;
6335  }
6336  // Handle the ARM mode MOV complex aliases.
6337  case ARM::ASRr:
6338  case ARM::LSRr:
6339  case ARM::LSLr:
6340  case ARM::RORr: {
6341    ARM_AM::ShiftOpc ShiftTy;
6342    switch(Inst.getOpcode()) {
6343    default: llvm_unreachable("unexpected opcode!");
6344    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6345    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6346    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6347    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6348    }
6349    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6350    MCInst TmpInst;
6351    TmpInst.setOpcode(ARM::MOVsr);
6352    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6353    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6354    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6355    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6356    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6357    TmpInst.addOperand(Inst.getOperand(4));
6358    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6359    Inst = TmpInst;
6360    return true;
6361  }
6362  case ARM::ASRi:
6363  case ARM::LSRi:
6364  case ARM::LSLi:
6365  case ARM::RORi: {
6366    ARM_AM::ShiftOpc ShiftTy;
6367    switch(Inst.getOpcode()) {
6368    default: llvm_unreachable("unexpected opcode!");
6369    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6370    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6371    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6372    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6373    }
6374    // A shift by zero is a plain MOVr, not a MOVsi.
6375    unsigned Amt = Inst.getOperand(2).getImm();
6376    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6377    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6378    MCInst TmpInst;
6379    TmpInst.setOpcode(Opc);
6380    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6381    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6382    if (Opc == ARM::MOVsi)
6383      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6384    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6385    TmpInst.addOperand(Inst.getOperand(4));
6386    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6387    Inst = TmpInst;
6388    return true;
6389  }
6390  case ARM::RRXi: {
6391    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6392    MCInst TmpInst;
6393    TmpInst.setOpcode(ARM::MOVsi);
6394    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6395    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6396    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6397    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6398    TmpInst.addOperand(Inst.getOperand(3));
6399    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6400    Inst = TmpInst;
6401    return true;
6402  }
6403  case ARM::t2LDMIA_UPD: {
6404    // If this is a load of a single register, then we should use
6405    // a post-indexed LDR instruction instead, per the ARM ARM.
6406    if (Inst.getNumOperands() != 5)
6407      return false;
6408    MCInst TmpInst;
6409    TmpInst.setOpcode(ARM::t2LDR_POST);
6410    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6411    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6412    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6413    TmpInst.addOperand(MCOperand::CreateImm(4));
6414    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6415    TmpInst.addOperand(Inst.getOperand(3));
6416    Inst = TmpInst;
6417    return true;
6418  }
6419  case ARM::t2STMDB_UPD: {
6420    // If this is a store of a single register, then we should use
6421    // a pre-indexed STR instruction instead, per the ARM ARM.
6422    if (Inst.getNumOperands() != 5)
6423      return false;
6424    MCInst TmpInst;
6425    TmpInst.setOpcode(ARM::t2STR_PRE);
6426    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6427    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6428    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6429    TmpInst.addOperand(MCOperand::CreateImm(-4));
6430    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6431    TmpInst.addOperand(Inst.getOperand(3));
6432    Inst = TmpInst;
6433    return true;
6434  }
6435  case ARM::LDMIA_UPD:
6436    // If this is a load of a single register via a 'pop', then we should use
6437    // a post-indexed LDR instruction instead, per the ARM ARM.
6438    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6439        Inst.getNumOperands() == 5) {
6440      MCInst TmpInst;
6441      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6442      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6443      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6444      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6445      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6446      TmpInst.addOperand(MCOperand::CreateImm(4));
6447      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6448      TmpInst.addOperand(Inst.getOperand(3));
6449      Inst = TmpInst;
6450      return true;
6451    }
6452    break;
6453  case ARM::STMDB_UPD:
6454    // If this is a store of a single register via a 'push', then we should use
6455    // a pre-indexed STR instruction instead, per the ARM ARM.
6456    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6457        Inst.getNumOperands() == 5) {
6458      MCInst TmpInst;
6459      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6460      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6461      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6462      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6463      TmpInst.addOperand(MCOperand::CreateImm(-4));
6464      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6465      TmpInst.addOperand(Inst.getOperand(3));
6466      Inst = TmpInst;
6467    }
6468    break;
6469  case ARM::t2ADDri12:
6470    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6471    // mnemonic was used (not "addw"), encoding T3 is preferred.
6472    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6473        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6474      break;
6475    Inst.setOpcode(ARM::t2ADDri);
6476    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6477    break;
6478  case ARM::t2SUBri12:
6479    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6480    // mnemonic was used (not "subw"), encoding T3 is preferred.
6481    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6482        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6483      break;
6484    Inst.setOpcode(ARM::t2SUBri);
6485    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6486    break;
6487  case ARM::tADDi8:
6488    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6489    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6490    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6491    // to encoding T1 if <Rd> is omitted."
6492    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6493      Inst.setOpcode(ARM::tADDi3);
6494      return true;
6495    }
6496    break;
6497  case ARM::tSUBi8:
6498    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6499    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6500    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6501    // to encoding T1 if <Rd> is omitted."
6502    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6503      Inst.setOpcode(ARM::tSUBi3);
6504      return true;
6505    }
6506    break;
6507  case ARM::t2ADDrr: {
6508    // If the destination and first source operand are the same, and
6509    // there's no setting of the flags, use encoding T2 instead of T3.
6510    // Note that this is only for ADD, not SUB. This mirrors the system
6511    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6512    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6513        Inst.getOperand(5).getReg() != 0 ||
6514        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6515         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6516      break;
6517    MCInst TmpInst;
6518    TmpInst.setOpcode(ARM::tADDhirr);
6519    TmpInst.addOperand(Inst.getOperand(0));
6520    TmpInst.addOperand(Inst.getOperand(0));
6521    TmpInst.addOperand(Inst.getOperand(2));
6522    TmpInst.addOperand(Inst.getOperand(3));
6523    TmpInst.addOperand(Inst.getOperand(4));
6524    Inst = TmpInst;
6525    return true;
6526  }
6527  case ARM::tB:
6528    // A Thumb conditional branch outside of an IT block is a tBcc.
6529    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6530      Inst.setOpcode(ARM::tBcc);
6531      return true;
6532    }
6533    break;
6534  case ARM::t2B:
6535    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6536    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6537      Inst.setOpcode(ARM::t2Bcc);
6538      return true;
6539    }
6540    break;
6541  case ARM::t2Bcc:
6542    // If the conditional is AL or we're in an IT block, we really want t2B.
6543    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6544      Inst.setOpcode(ARM::t2B);
6545      return true;
6546    }
6547    break;
6548  case ARM::tBcc:
6549    // If the conditional is AL, we really want tB.
6550    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6551      Inst.setOpcode(ARM::tB);
6552      return true;
6553    }
6554    break;
6555  case ARM::tLDMIA: {
6556    // If the register list contains any high registers, or if the writeback
6557    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6558    // instead if we're in Thumb2. Otherwise, this should have generated
6559    // an error in validateInstruction().
6560    unsigned Rn = Inst.getOperand(0).getReg();
6561    bool hasWritebackToken =
6562      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6563       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6564    bool listContainsBase;
6565    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6566        (!listContainsBase && !hasWritebackToken) ||
6567        (listContainsBase && hasWritebackToken)) {
6568      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6569      assert (isThumbTwo());
6570      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6571      // If we're switching to the updating version, we need to insert
6572      // the writeback tied operand.
6573      if (hasWritebackToken)
6574        Inst.insert(Inst.begin(),
6575                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6576      return true;
6577    }
6578    break;
6579  }
6580  case ARM::tSTMIA_UPD: {
6581    // If the register list contains any high registers, we need to use
6582    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6583    // should have generated an error in validateInstruction().
6584    unsigned Rn = Inst.getOperand(0).getReg();
6585    bool listContainsBase;
6586    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6587      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6588      assert (isThumbTwo());
6589      Inst.setOpcode(ARM::t2STMIA_UPD);
6590      return true;
6591    }
6592    break;
6593  }
6594  case ARM::tPOP: {
6595    bool listContainsBase;
6596    // If the register list contains any high registers, we need to use
6597    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6598    // should have generated an error in validateInstruction().
6599    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6600      return false;
6601    assert (isThumbTwo());
6602    Inst.setOpcode(ARM::t2LDMIA_UPD);
6603    // Add the base register and writeback operands.
6604    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6605    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6606    return true;
6607  }
6608  case ARM::tPUSH: {
6609    bool listContainsBase;
6610    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6611      return false;
6612    assert (isThumbTwo());
6613    Inst.setOpcode(ARM::t2STMDB_UPD);
6614    // Add the base register and writeback operands.
6615    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6616    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6617    return true;
6618  }
6619  case ARM::t2MOVi: {
6620    // If we can use the 16-bit encoding and the user didn't explicitly
6621    // request the 32-bit variant, transform it here.
6622    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6623        Inst.getOperand(1).getImm() <= 255 &&
6624        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6625         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6626        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6627        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6628         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6629      // The operands aren't in the same order for tMOVi8...
6630      MCInst TmpInst;
6631      TmpInst.setOpcode(ARM::tMOVi8);
6632      TmpInst.addOperand(Inst.getOperand(0));
6633      TmpInst.addOperand(Inst.getOperand(4));
6634      TmpInst.addOperand(Inst.getOperand(1));
6635      TmpInst.addOperand(Inst.getOperand(2));
6636      TmpInst.addOperand(Inst.getOperand(3));
6637      Inst = TmpInst;
6638      return true;
6639    }
6640    break;
6641  }
6642  case ARM::t2MOVr: {
6643    // If we can use the 16-bit encoding and the user didn't explicitly
6644    // request the 32-bit variant, transform it here.
6645    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6646        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6647        Inst.getOperand(2).getImm() == ARMCC::AL &&
6648        Inst.getOperand(4).getReg() == ARM::CPSR &&
6649        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6650         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6651      // The operands aren't the same for tMOV[S]r... (no cc_out)
6652      MCInst TmpInst;
6653      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6654      TmpInst.addOperand(Inst.getOperand(0));
6655      TmpInst.addOperand(Inst.getOperand(1));
6656      TmpInst.addOperand(Inst.getOperand(2));
6657      TmpInst.addOperand(Inst.getOperand(3));
6658      Inst = TmpInst;
6659      return true;
6660    }
6661    break;
6662  }
6663  case ARM::t2SXTH:
6664  case ARM::t2SXTB:
6665  case ARM::t2UXTH:
6666  case ARM::t2UXTB: {
6667    // If we can use the 16-bit encoding and the user didn't explicitly
6668    // request the 32-bit variant, transform it here.
6669    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6670        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6671        Inst.getOperand(2).getImm() == 0 &&
6672        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6673         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6674      unsigned NewOpc;
6675      switch (Inst.getOpcode()) {
6676      default: llvm_unreachable("Illegal opcode!");
6677      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
6678      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
6679      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
6680      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
6681      }
6682      // The operands aren't the same for thumb1 (no rotate operand).
6683      MCInst TmpInst;
6684      TmpInst.setOpcode(NewOpc);
6685      TmpInst.addOperand(Inst.getOperand(0));
6686      TmpInst.addOperand(Inst.getOperand(1));
6687      TmpInst.addOperand(Inst.getOperand(3));
6688      TmpInst.addOperand(Inst.getOperand(4));
6689      Inst = TmpInst;
6690      return true;
6691    }
6692    break;
6693  }
6694  case ARM::MOVsi: {
6695    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
6696    if (SOpc == ARM_AM::rrx) return false;
6697    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
6698      // Shifting by zero is accepted as a vanilla 'MOVr'
6699      MCInst TmpInst;
6700      TmpInst.setOpcode(ARM::MOVr);
6701      TmpInst.addOperand(Inst.getOperand(0));
6702      TmpInst.addOperand(Inst.getOperand(1));
6703      TmpInst.addOperand(Inst.getOperand(3));
6704      TmpInst.addOperand(Inst.getOperand(4));
6705      TmpInst.addOperand(Inst.getOperand(5));
6706      Inst = TmpInst;
6707      return true;
6708    }
6709    return false;
6710  }
6711  case ARM::ANDrsi:
6712  case ARM::ORRrsi:
6713  case ARM::EORrsi:
6714  case ARM::BICrsi:
6715  case ARM::SUBrsi:
6716  case ARM::ADDrsi: {
6717    unsigned newOpc;
6718    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
6719    if (SOpc == ARM_AM::rrx) return false;
6720    switch (Inst.getOpcode()) {
6721    default: assert(0 && "unexpected opcode!");
6722    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
6723    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
6724    case ARM::EORrsi: newOpc = ARM::EORrr; break;
6725    case ARM::BICrsi: newOpc = ARM::BICrr; break;
6726    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
6727    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
6728    }
6729    // If the shift is by zero, use the non-shifted instruction definition.
6730    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
6731      MCInst TmpInst;
6732      TmpInst.setOpcode(newOpc);
6733      TmpInst.addOperand(Inst.getOperand(0));
6734      TmpInst.addOperand(Inst.getOperand(1));
6735      TmpInst.addOperand(Inst.getOperand(2));
6736      TmpInst.addOperand(Inst.getOperand(4));
6737      TmpInst.addOperand(Inst.getOperand(5));
6738      TmpInst.addOperand(Inst.getOperand(6));
6739      Inst = TmpInst;
6740      return true;
6741    }
6742    return false;
6743  }
6744  case ARM::t2IT: {
6745    // The mask bits for all but the first condition are represented as
6746    // the low bit of the condition code value implies 't'. We currently
6747    // always have 1 implies 't', so XOR toggle the bits if the low bit
6748    // of the condition code is zero. The encoding also expects the low
6749    // bit of the condition to be encoded as bit 4 of the mask operand,
6750    // so mask that in if needed
6751    MCOperand &MO = Inst.getOperand(1);
6752    unsigned Mask = MO.getImm();
6753    unsigned OrigMask = Mask;
6754    unsigned TZ = CountTrailingZeros_32(Mask);
6755    if ((Inst.getOperand(0).getImm() & 1) == 0) {
6756      assert(Mask && TZ <= 3 && "illegal IT mask value!");
6757      for (unsigned i = 3; i != TZ; --i)
6758        Mask ^= 1 << i;
6759    } else
6760      Mask |= 0x10;
6761    MO.setImm(Mask);
6762
6763    // Set up the IT block state according to the IT instruction we just
6764    // matched.
6765    assert(!inITBlock() && "nested IT blocks?!");
6766    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
6767    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
6768    ITState.CurPosition = 0;
6769    ITState.FirstCond = true;
6770    break;
6771  }
6772  }
6773  return false;
6774}
6775
6776unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
6777  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
6778  // suffix depending on whether they're in an IT block or not.
6779  unsigned Opc = Inst.getOpcode();
6780  const MCInstrDesc &MCID = getInstDesc(Opc);
6781  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6782    assert(MCID.hasOptionalDef() &&
6783           "optionally flag setting instruction missing optional def operand");
6784    assert(MCID.NumOperands == Inst.getNumOperands() &&
6785           "operand count mismatch!");
6786    // Find the optional-def operand (cc_out).
6787    unsigned OpNo;
6788    for (OpNo = 0;
6789         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6790         ++OpNo)
6791      ;
6792    // If we're parsing Thumb1, reject it completely.
6793    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6794      return Match_MnemonicFail;
6795    // If we're parsing Thumb2, which form is legal depends on whether we're
6796    // in an IT block.
6797    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6798        !inITBlock())
6799      return Match_RequiresITBlock;
6800    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6801        inITBlock())
6802      return Match_RequiresNotITBlock;
6803  }
6804  // Some high-register supporting Thumb1 encodings only allow both registers
6805  // to be from r0-r7 when in Thumb2.
6806  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6807           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6808           isARMLowRegister(Inst.getOperand(2).getReg()))
6809    return Match_RequiresThumb2;
6810  // Others only require ARMv6 or later.
6811  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6812           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6813           isARMLowRegister(Inst.getOperand(1).getReg()))
6814    return Match_RequiresV6;
6815  return Match_Success;
6816}
6817
6818bool ARMAsmParser::
6819MatchAndEmitInstruction(SMLoc IDLoc,
6820                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6821                        MCStreamer &Out) {
6822  MCInst Inst;
6823  unsigned ErrorInfo;
6824  unsigned MatchResult;
6825  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6826  switch (MatchResult) {
6827  default: break;
6828  case Match_Success:
6829    // Context sensitive operand constraints aren't handled by the matcher,
6830    // so check them here.
6831    if (validateInstruction(Inst, Operands)) {
6832      // Still progress the IT block, otherwise one wrong condition causes
6833      // nasty cascading errors.
6834      forwardITPosition();
6835      return true;
6836    }
6837
6838    // Some instructions need post-processing to, for example, tweak which
6839    // encoding is selected. Loop on it while changes happen so the
6840    // individual transformations can chain off each other. E.g.,
6841    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6842    while (processInstruction(Inst, Operands))
6843      ;
6844
6845    // Only move forward at the very end so that everything in validate
6846    // and process gets a consistent answer about whether we're in an IT
6847    // block.
6848    forwardITPosition();
6849
6850    Out.EmitInstruction(Inst);
6851    return false;
6852  case Match_MissingFeature:
6853    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6854    return true;
6855  case Match_InvalidOperand: {
6856    SMLoc ErrorLoc = IDLoc;
6857    if (ErrorInfo != ~0U) {
6858      if (ErrorInfo >= Operands.size())
6859        return Error(IDLoc, "too few operands for instruction");
6860
6861      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6862      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6863    }
6864
6865    return Error(ErrorLoc, "invalid operand for instruction");
6866  }
6867  case Match_MnemonicFail:
6868    return Error(IDLoc, "invalid instruction");
6869  case Match_ConversionFail:
6870    // The converter function will have already emited a diagnostic.
6871    return true;
6872  case Match_RequiresNotITBlock:
6873    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6874  case Match_RequiresITBlock:
6875    return Error(IDLoc, "instruction only valid inside IT block");
6876  case Match_RequiresV6:
6877    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6878  case Match_RequiresThumb2:
6879    return Error(IDLoc, "instruction variant requires Thumb2");
6880  }
6881
6882  llvm_unreachable("Implement any new match types added!");
6883}
6884
6885/// parseDirective parses the arm specific directives
6886bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6887  StringRef IDVal = DirectiveID.getIdentifier();
6888  if (IDVal == ".word")
6889    return parseDirectiveWord(4, DirectiveID.getLoc());
6890  else if (IDVal == ".thumb")
6891    return parseDirectiveThumb(DirectiveID.getLoc());
6892  else if (IDVal == ".arm")
6893    return parseDirectiveARM(DirectiveID.getLoc());
6894  else if (IDVal == ".thumb_func")
6895    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6896  else if (IDVal == ".code")
6897    return parseDirectiveCode(DirectiveID.getLoc());
6898  else if (IDVal == ".syntax")
6899    return parseDirectiveSyntax(DirectiveID.getLoc());
6900  else if (IDVal == ".unreq")
6901    return parseDirectiveUnreq(DirectiveID.getLoc());
6902  else if (IDVal == ".arch")
6903    return parseDirectiveArch(DirectiveID.getLoc());
6904  else if (IDVal == ".eabi_attribute")
6905    return parseDirectiveEabiAttr(DirectiveID.getLoc());
6906  return true;
6907}
6908
6909/// parseDirectiveWord
6910///  ::= .word [ expression (, expression)* ]
6911bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6912  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6913    for (;;) {
6914      const MCExpr *Value;
6915      if (getParser().ParseExpression(Value))
6916        return true;
6917
6918      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6919
6920      if (getLexer().is(AsmToken::EndOfStatement))
6921        break;
6922
6923      // FIXME: Improve diagnostic.
6924      if (getLexer().isNot(AsmToken::Comma))
6925        return Error(L, "unexpected token in directive");
6926      Parser.Lex();
6927    }
6928  }
6929
6930  Parser.Lex();
6931  return false;
6932}
6933
6934/// parseDirectiveThumb
6935///  ::= .thumb
6936bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6937  if (getLexer().isNot(AsmToken::EndOfStatement))
6938    return Error(L, "unexpected token in directive");
6939  Parser.Lex();
6940
6941  if (!isThumb())
6942    SwitchMode();
6943  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6944  return false;
6945}
6946
6947/// parseDirectiveARM
6948///  ::= .arm
6949bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6950  if (getLexer().isNot(AsmToken::EndOfStatement))
6951    return Error(L, "unexpected token in directive");
6952  Parser.Lex();
6953
6954  if (isThumb())
6955    SwitchMode();
6956  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6957  return false;
6958}
6959
6960/// parseDirectiveThumbFunc
6961///  ::= .thumbfunc symbol_name
6962bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6963  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6964  bool isMachO = MAI.hasSubsectionsViaSymbols();
6965  StringRef Name;
6966  bool needFuncName = true;
6967
6968  // Darwin asm has (optionally) function name after .thumb_func direction
6969  // ELF doesn't
6970  if (isMachO) {
6971    const AsmToken &Tok = Parser.getTok();
6972    if (Tok.isNot(AsmToken::EndOfStatement)) {
6973      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6974        return Error(L, "unexpected token in .thumb_func directive");
6975      Name = Tok.getIdentifier();
6976      Parser.Lex(); // Consume the identifier token.
6977      needFuncName = false;
6978    }
6979  }
6980
6981  if (getLexer().isNot(AsmToken::EndOfStatement))
6982    return Error(L, "unexpected token in directive");
6983
6984  // Eat the end of statement and any blank lines that follow.
6985  while (getLexer().is(AsmToken::EndOfStatement))
6986    Parser.Lex();
6987
6988  // FIXME: assuming function name will be the line following .thumb_func
6989  // We really should be checking the next symbol definition even if there's
6990  // stuff in between.
6991  if (needFuncName) {
6992    Name = Parser.getTok().getIdentifier();
6993  }
6994
6995  // Mark symbol as a thumb symbol.
6996  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6997  getParser().getStreamer().EmitThumbFunc(Func);
6998  return false;
6999}
7000
7001/// parseDirectiveSyntax
7002///  ::= .syntax unified | divided
7003bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7004  const AsmToken &Tok = Parser.getTok();
7005  if (Tok.isNot(AsmToken::Identifier))
7006    return Error(L, "unexpected token in .syntax directive");
7007  StringRef Mode = Tok.getString();
7008  if (Mode == "unified" || Mode == "UNIFIED")
7009    Parser.Lex();
7010  else if (Mode == "divided" || Mode == "DIVIDED")
7011    return Error(L, "'.syntax divided' arm asssembly not supported");
7012  else
7013    return Error(L, "unrecognized syntax mode in .syntax directive");
7014
7015  if (getLexer().isNot(AsmToken::EndOfStatement))
7016    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7017  Parser.Lex();
7018
7019  // TODO tell the MC streamer the mode
7020  // getParser().getStreamer().Emit???();
7021  return false;
7022}
7023
7024/// parseDirectiveCode
7025///  ::= .code 16 | 32
7026bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7027  const AsmToken &Tok = Parser.getTok();
7028  if (Tok.isNot(AsmToken::Integer))
7029    return Error(L, "unexpected token in .code directive");
7030  int64_t Val = Parser.getTok().getIntVal();
7031  if (Val == 16)
7032    Parser.Lex();
7033  else if (Val == 32)
7034    Parser.Lex();
7035  else
7036    return Error(L, "invalid operand to .code directive");
7037
7038  if (getLexer().isNot(AsmToken::EndOfStatement))
7039    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7040  Parser.Lex();
7041
7042  if (Val == 16) {
7043    if (!isThumb())
7044      SwitchMode();
7045    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7046  } else {
7047    if (isThumb())
7048      SwitchMode();
7049    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7050  }
7051
7052  return false;
7053}
7054
7055/// parseDirectiveReq
7056///  ::= name .req registername
7057bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7058  Parser.Lex(); // Eat the '.req' token.
7059  unsigned Reg;
7060  SMLoc SRegLoc, ERegLoc;
7061  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7062    Parser.EatToEndOfStatement();
7063    return Error(SRegLoc, "register name expected");
7064  }
7065
7066  // Shouldn't be anything else.
7067  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7068    Parser.EatToEndOfStatement();
7069    return Error(Parser.getTok().getLoc(),
7070                 "unexpected input in .req directive.");
7071  }
7072
7073  Parser.Lex(); // Consume the EndOfStatement
7074
7075  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7076    return Error(SRegLoc, "redefinition of '" + Name +
7077                          "' does not match original.");
7078
7079  return false;
7080}
7081
7082/// parseDirectiveUneq
7083///  ::= .unreq registername
7084bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7085  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7086    Parser.EatToEndOfStatement();
7087    return Error(L, "unexpected input in .unreq directive.");
7088  }
7089  RegisterReqs.erase(Parser.getTok().getIdentifier());
7090  Parser.Lex(); // Eat the identifier.
7091  return false;
7092}
7093
7094/// parseDirectiveArch
7095///  ::= .arch token
7096bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7097  return true;
7098}
7099
7100/// parseDirectiveEabiAttr
7101///  ::= .eabi_attribute int, int
7102bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7103  return true;
7104}
7105
7106extern "C" void LLVMInitializeARMAsmLexer();
7107
7108/// Force static initialization.
7109extern "C" void LLVMInitializeARMAsmParser() {
7110  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7111  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7112  LLVMInitializeARMAsmLexer();
7113}
7114
7115#define GET_REGISTER_MATCHER
7116#define GET_MATCHER_IMPLEMENTATION
7117#include "ARMGenAsmMatcher.inc"
7118