ARMAsmParser.cpp revision ad353c630359d285018a250d72c80b7022d8e67e
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
86  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
87
88  int tryParseRegister();
89  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
90  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
93  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
94  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
95  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
96                              unsigned &ShiftAmount);
97  bool parseDirectiveWord(unsigned Size, SMLoc L);
98  bool parseDirectiveThumb(SMLoc L);
99  bool parseDirectiveARM(SMLoc L);
100  bool parseDirectiveThumbFunc(SMLoc L);
101  bool parseDirectiveCode(SMLoc L);
102  bool parseDirectiveSyntax(SMLoc L);
103  bool parseDirectiveReq(StringRef Name, SMLoc L);
104  bool parseDirectiveUnreq(SMLoc L);
105  bool parseDirectiveArch(SMLoc L);
106  bool parseDirectiveEabiAttr(SMLoc L);
107
108  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
109                          bool &CarrySetting, unsigned &ProcessorIMod,
110                          StringRef &ITMask);
111  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
112                             bool &CanAcceptPredicationCode);
113
114  bool isThumb() const {
115    // FIXME: Can tablegen auto-generate this?
116    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
117  }
118  bool isThumbOne() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
120  }
121  bool isThumbTwo() const {
122    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
123  }
124  bool hasV6Ops() const {
125    return STI.getFeatureBits() & ARM::HasV6Ops;
126  }
127  bool hasV7Ops() const {
128    return STI.getFeatureBits() & ARM::HasV7Ops;
129  }
130  void SwitchMode() {
131    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
132    setAvailableFeatures(FB);
133  }
134  bool isMClass() const {
135    return STI.getFeatureBits() & ARM::FeatureMClass;
136  }
137
138  /// @name Auto-generated Match Functions
139  /// {
140
141#define GET_ASSEMBLER_HEADER
142#include "ARMGenAsmMatcher.inc"
143
144  /// }
145
146  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
147  OperandMatchResultTy parseCoprocNumOperand(
148    SmallVectorImpl<MCParsedAsmOperand*>&);
149  OperandMatchResultTy parseCoprocRegOperand(
150    SmallVectorImpl<MCParsedAsmOperand*>&);
151  OperandMatchResultTy parseCoprocOptionOperand(
152    SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseMemBarrierOptOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseProcIFlagsOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseMSRMaskOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
160                                   StringRef Op, int Low, int High);
161  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "lsl", 0, 31);
163  }
164  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
165    return parsePKHImm(O, "asr", 1, 32);
166  }
167  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
176
177  // Asm Match Converter Methods
178  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
179                    const SmallVectorImpl<MCParsedAsmOperand*> &);
180  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
181                    const SmallVectorImpl<MCParsedAsmOperand*> &);
182  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
183                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
184  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
185                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
187                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
197                             const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
199                             const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
201                             const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
205                  const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
207                  const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
209                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
211                        const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
213                     const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
215                        const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
217                     const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
219                        const SmallVectorImpl<MCParsedAsmOperand*> &);
220
221  bool validateInstruction(MCInst &Inst,
222                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
223  bool processInstruction(MCInst &Inst,
224                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
225  bool shouldOmitCCOutOperand(StringRef Mnemonic,
226                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
227
228public:
229  enum ARMMatchResultTy {
230    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
231    Match_RequiresNotITBlock,
232    Match_RequiresV6,
233    Match_RequiresThumb2
234  };
235
236  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
237    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
238    MCAsmParserExtension::Initialize(_Parser);
239
240    // Cache the MCRegisterInfo.
241    MRI = &getContext().getRegisterInfo();
242
243    // Initialize the set of available features.
244    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
245
246    // Not in an ITBlock to start with.
247    ITState.CurPosition = ~0U;
248  }
249
250  // Implementation of the MCTargetAsmParser interface:
251  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
252  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
253                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
254  bool ParseDirective(AsmToken DirectiveID);
255
256  unsigned checkTargetMatchPredicate(MCInst &Inst);
257
258  bool MatchAndEmitInstruction(SMLoc IDLoc,
259                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
260                               MCStreamer &Out);
261};
262} // end anonymous namespace
263
264namespace {
265
266/// ARMOperand - Instances of this class represent a parsed ARM machine
267/// instruction.
268class ARMOperand : public MCParsedAsmOperand {
269  enum KindTy {
270    k_CondCode,
271    k_CCOut,
272    k_ITCondMask,
273    k_CoprocNum,
274    k_CoprocReg,
275    k_CoprocOption,
276    k_Immediate,
277    k_MemBarrierOpt,
278    k_Memory,
279    k_PostIndexRegister,
280    k_MSRMask,
281    k_ProcIFlags,
282    k_VectorIndex,
283    k_Register,
284    k_RegisterList,
285    k_DPRRegisterList,
286    k_SPRRegisterList,
287    k_VectorList,
288    k_VectorListAllLanes,
289    k_VectorListIndexed,
290    k_ShiftedRegister,
291    k_ShiftedImmediate,
292    k_ShifterImmediate,
293    k_RotateImmediate,
294    k_BitfieldDescriptor,
295    k_Token
296  } Kind;
297
298  SMLoc StartLoc, EndLoc;
299  SmallVector<unsigned, 8> Registers;
300
301  union {
302    struct {
303      ARMCC::CondCodes Val;
304    } CC;
305
306    struct {
307      unsigned Val;
308    } Cop;
309
310    struct {
311      unsigned Val;
312    } CoprocOption;
313
314    struct {
315      unsigned Mask:4;
316    } ITMask;
317
318    struct {
319      ARM_MB::MemBOpt Val;
320    } MBOpt;
321
322    struct {
323      ARM_PROC::IFlags Val;
324    } IFlags;
325
326    struct {
327      unsigned Val;
328    } MMask;
329
330    struct {
331      const char *Data;
332      unsigned Length;
333    } Tok;
334
335    struct {
336      unsigned RegNum;
337    } Reg;
338
339    // A vector register list is a sequential list of 1 to 4 registers.
340    struct {
341      unsigned RegNum;
342      unsigned Count;
343      unsigned LaneIndex;
344      bool isDoubleSpaced;
345    } VectorList;
346
347    struct {
348      unsigned Val;
349    } VectorIndex;
350
351    struct {
352      const MCExpr *Val;
353    } Imm;
354
355    /// Combined record for all forms of ARM address expressions.
356    struct {
357      unsigned BaseRegNum;
358      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
359      // was specified.
360      const MCConstantExpr *OffsetImm;  // Offset immediate value
361      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
362      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
363      unsigned ShiftImm;        // shift for OffsetReg.
364      unsigned Alignment;       // 0 = no alignment specified
365                                // n = alignment in bytes (2, 4, 8, 16, or 32)
366      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
367    } Memory;
368
369    struct {
370      unsigned RegNum;
371      bool isAdd;
372      ARM_AM::ShiftOpc ShiftTy;
373      unsigned ShiftImm;
374    } PostIdxReg;
375
376    struct {
377      bool isASR;
378      unsigned Imm;
379    } ShifterImm;
380    struct {
381      ARM_AM::ShiftOpc ShiftTy;
382      unsigned SrcReg;
383      unsigned ShiftReg;
384      unsigned ShiftImm;
385    } RegShiftedReg;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftImm;
390    } RegShiftedImm;
391    struct {
392      unsigned Imm;
393    } RotImm;
394    struct {
395      unsigned LSB;
396      unsigned Width;
397    } Bitfield;
398  };
399
400  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
401public:
402  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
403    Kind = o.Kind;
404    StartLoc = o.StartLoc;
405    EndLoc = o.EndLoc;
406    switch (Kind) {
407    case k_CondCode:
408      CC = o.CC;
409      break;
410    case k_ITCondMask:
411      ITMask = o.ITMask;
412      break;
413    case k_Token:
414      Tok = o.Tok;
415      break;
416    case k_CCOut:
417    case k_Register:
418      Reg = o.Reg;
419      break;
420    case k_RegisterList:
421    case k_DPRRegisterList:
422    case k_SPRRegisterList:
423      Registers = o.Registers;
424      break;
425    case k_VectorList:
426    case k_VectorListAllLanes:
427    case k_VectorListIndexed:
428      VectorList = o.VectorList;
429      break;
430    case k_CoprocNum:
431    case k_CoprocReg:
432      Cop = o.Cop;
433      break;
434    case k_CoprocOption:
435      CoprocOption = o.CoprocOption;
436      break;
437    case k_Immediate:
438      Imm = o.Imm;
439      break;
440    case k_MemBarrierOpt:
441      MBOpt = o.MBOpt;
442      break;
443    case k_Memory:
444      Memory = o.Memory;
445      break;
446    case k_PostIndexRegister:
447      PostIdxReg = o.PostIdxReg;
448      break;
449    case k_MSRMask:
450      MMask = o.MMask;
451      break;
452    case k_ProcIFlags:
453      IFlags = o.IFlags;
454      break;
455    case k_ShifterImmediate:
456      ShifterImm = o.ShifterImm;
457      break;
458    case k_ShiftedRegister:
459      RegShiftedReg = o.RegShiftedReg;
460      break;
461    case k_ShiftedImmediate:
462      RegShiftedImm = o.RegShiftedImm;
463      break;
464    case k_RotateImmediate:
465      RotImm = o.RotImm;
466      break;
467    case k_BitfieldDescriptor:
468      Bitfield = o.Bitfield;
469      break;
470    case k_VectorIndex:
471      VectorIndex = o.VectorIndex;
472      break;
473    }
474  }
475
476  /// getStartLoc - Get the location of the first token of this operand.
477  SMLoc getStartLoc() const { return StartLoc; }
478  /// getEndLoc - Get the location of the last token of this operand.
479  SMLoc getEndLoc() const { return EndLoc; }
480
481  ARMCC::CondCodes getCondCode() const {
482    assert(Kind == k_CondCode && "Invalid access!");
483    return CC.Val;
484  }
485
486  unsigned getCoproc() const {
487    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
488    return Cop.Val;
489  }
490
491  StringRef getToken() const {
492    assert(Kind == k_Token && "Invalid access!");
493    return StringRef(Tok.Data, Tok.Length);
494  }
495
496  unsigned getReg() const {
497    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
498    return Reg.RegNum;
499  }
500
501  const SmallVectorImpl<unsigned> &getRegList() const {
502    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
503            Kind == k_SPRRegisterList) && "Invalid access!");
504    return Registers;
505  }
506
507  const MCExpr *getImm() const {
508    assert(isImm() && "Invalid access!");
509    return Imm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const {
541    if (!isImm()) return false;
542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
543    if (!CE) return false;
544    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
545    return Val != -1;
546  }
547  bool isFBits16() const {
548    if (!isImm()) return false;
549    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
550    if (!CE) return false;
551    int64_t Value = CE->getValue();
552    return Value >= 0 && Value <= 16;
553  }
554  bool isFBits32() const {
555    if (!isImm()) return false;
556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
557    if (!CE) return false;
558    int64_t Value = CE->getValue();
559    return Value >= 1 && Value <= 32;
560  }
561  bool isImm8s4() const {
562    if (!isImm()) return false;
563    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
564    if (!CE) return false;
565    int64_t Value = CE->getValue();
566    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
567  }
568  bool isImm0_1020s4() const {
569    if (!isImm()) return false;
570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
571    if (!CE) return false;
572    int64_t Value = CE->getValue();
573    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
574  }
575  bool isImm0_508s4() const {
576    if (!isImm()) return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
581  }
582  bool isImm0_255() const {
583    if (!isImm()) return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = CE->getValue();
587    return Value >= 0 && Value < 256;
588  }
589  bool isImm0_1() const {
590    if (!isImm()) return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 2;
595  }
596  bool isImm0_3() const {
597    if (!isImm()) return false;
598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
599    if (!CE) return false;
600    int64_t Value = CE->getValue();
601    return Value >= 0 && Value < 4;
602  }
603  bool isImm0_7() const {
604    if (!isImm()) return false;
605    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
606    if (!CE) return false;
607    int64_t Value = CE->getValue();
608    return Value >= 0 && Value < 8;
609  }
610  bool isImm0_15() const {
611    if (!isImm()) return false;
612    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
613    if (!CE) return false;
614    int64_t Value = CE->getValue();
615    return Value >= 0 && Value < 16;
616  }
617  bool isImm0_31() const {
618    if (!isImm()) return false;
619    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
620    if (!CE) return false;
621    int64_t Value = CE->getValue();
622    return Value >= 0 && Value < 32;
623  }
624  bool isImm0_63() const {
625    if (!isImm()) return false;
626    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
627    if (!CE) return false;
628    int64_t Value = CE->getValue();
629    return Value >= 0 && Value < 64;
630  }
631  bool isImm8() const {
632    if (!isImm()) return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 8;
637  }
638  bool isImm16() const {
639    if (!isImm()) return false;
640    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
641    if (!CE) return false;
642    int64_t Value = CE->getValue();
643    return Value == 16;
644  }
645  bool isImm32() const {
646    if (!isImm()) return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (!isImm()) return false;
654    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
655    if (!CE) return false;
656    int64_t Value = CE->getValue();
657    return Value > 0 && Value <= 8;
658  }
659  bool isShrImm16() const {
660    if (!isImm()) return false;
661    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
662    if (!CE) return false;
663    int64_t Value = CE->getValue();
664    return Value > 0 && Value <= 16;
665  }
666  bool isShrImm32() const {
667    if (!isImm()) return false;
668    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
669    if (!CE) return false;
670    int64_t Value = CE->getValue();
671    return Value > 0 && Value <= 32;
672  }
673  bool isShrImm64() const {
674    if (!isImm()) return false;
675    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
676    if (!CE) return false;
677    int64_t Value = CE->getValue();
678    return Value > 0 && Value <= 64;
679  }
680  bool isImm1_7() const {
681    if (!isImm()) return false;
682    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683    if (!CE) return false;
684    int64_t Value = CE->getValue();
685    return Value > 0 && Value < 8;
686  }
687  bool isImm1_15() const {
688    if (!isImm()) return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value > 0 && Value < 16;
693  }
694  bool isImm1_31() const {
695    if (!isImm()) return false;
696    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
697    if (!CE) return false;
698    int64_t Value = CE->getValue();
699    return Value > 0 && Value < 32;
700  }
701  bool isImm1_16() const {
702    if (!isImm()) return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 17;
707  }
708  bool isImm1_32() const {
709    if (!isImm()) return false;
710    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
711    if (!CE) return false;
712    int64_t Value = CE->getValue();
713    return Value > 0 && Value < 33;
714  }
715  bool isImm0_32() const {
716    if (!isImm()) return false;
717    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
718    if (!CE) return false;
719    int64_t Value = CE->getValue();
720    return Value >= 0 && Value < 33;
721  }
722  bool isImm0_65535() const {
723    if (!isImm()) return false;
724    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
725    if (!CE) return false;
726    int64_t Value = CE->getValue();
727    return Value >= 0 && Value < 65536;
728  }
729  bool isImm0_65535Expr() const {
730    if (!isImm()) return false;
731    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
732    // If it's not a constant expression, it'll generate a fixup and be
733    // handled later.
734    if (!CE) return true;
735    int64_t Value = CE->getValue();
736    return Value >= 0 && Value < 65536;
737  }
738  bool isImm24bit() const {
739    if (!isImm()) return false;
740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
741    if (!CE) return false;
742    int64_t Value = CE->getValue();
743    return Value >= 0 && Value <= 0xffffff;
744  }
745  bool isImmThumbSR() const {
746    if (!isImm()) return false;
747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748    if (!CE) return false;
749    int64_t Value = CE->getValue();
750    return Value > 0 && Value < 33;
751  }
752  bool isPKHLSLImm() const {
753    if (!isImm()) return false;
754    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755    if (!CE) return false;
756    int64_t Value = CE->getValue();
757    return Value >= 0 && Value < 32;
758  }
759  bool isPKHASRImm() const {
760    if (!isImm()) return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value <= 32;
765  }
766  bool isARMSOImm() const {
767    if (!isImm()) return false;
768    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
769    if (!CE) return false;
770    int64_t Value = CE->getValue();
771    return ARM_AM::getSOImmVal(Value) != -1;
772  }
773  bool isARMSOImmNot() const {
774    if (!isImm()) return false;
775    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
776    if (!CE) return false;
777    int64_t Value = CE->getValue();
778    return ARM_AM::getSOImmVal(~Value) != -1;
779  }
780  bool isARMSOImmNeg() const {
781    if (!isImm()) return false;
782    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783    if (!CE) return false;
784    int64_t Value = CE->getValue();
785    // Only use this when not representable as a plain so_imm.
786    return ARM_AM::getSOImmVal(Value) == -1 &&
787      ARM_AM::getSOImmVal(-Value) != -1;
788  }
789  bool isT2SOImm() const {
790    if (!isImm()) return false;
791    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
792    if (!CE) return false;
793    int64_t Value = CE->getValue();
794    return ARM_AM::getT2SOImmVal(Value) != -1;
795  }
796  bool isT2SOImmNot() const {
797    if (!isImm()) return false;
798    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
799    if (!CE) return false;
800    int64_t Value = CE->getValue();
801    return ARM_AM::getT2SOImmVal(~Value) != -1;
802  }
803  bool isT2SOImmNeg() const {
804    if (!isImm()) return false;
805    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
806    if (!CE) return false;
807    int64_t Value = CE->getValue();
808    // Only use this when not representable as a plain so_imm.
809    return ARM_AM::getT2SOImmVal(Value) == -1 &&
810      ARM_AM::getT2SOImmVal(-Value) != -1;
811  }
812  bool isSetEndImm() const {
813    if (!isImm()) return false;
814    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
815    if (!CE) return false;
816    int64_t Value = CE->getValue();
817    return Value == 1 || Value == 0;
818  }
819  bool isReg() const { return Kind == k_Register; }
820  bool isRegList() const { return Kind == k_RegisterList; }
821  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
822  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
823  bool isToken() const { return Kind == k_Token; }
824  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
825  bool isMemory() const { return Kind == k_Memory; }
826  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
827  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
828  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
829  bool isRotImm() const { return Kind == k_RotateImmediate; }
830  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
831  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
832  bool isPostIdxReg() const {
833    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
834  }
835  bool isMemNoOffset(bool alignOK = false) const {
836    if (!isMemory())
837      return false;
838    // No offset of any kind.
839    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
840     (alignOK || Memory.Alignment == 0);
841  }
842  bool isMemPCRelImm12() const {
843    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
844      return false;
845    // Base register must be PC.
846    if (Memory.BaseRegNum != ARM::PC)
847      return false;
848    // Immediate offset in range [-4095, 4095].
849    if (!Memory.OffsetImm) return true;
850    int64_t Val = Memory.OffsetImm->getValue();
851    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
852  }
853  bool isAlignedMemory() const {
854    return isMemNoOffset(true);
855  }
856  bool isAddrMode2() const {
857    if (!isMemory() || Memory.Alignment != 0) return false;
858    // Check for register offset.
859    if (Memory.OffsetRegNum) return true;
860    // Immediate offset in range [-4095, 4095].
861    if (!Memory.OffsetImm) return true;
862    int64_t Val = Memory.OffsetImm->getValue();
863    return Val > -4096 && Val < 4096;
864  }
865  bool isAM2OffsetImm() const {
866    if (!isImm()) return false;
867    // Immediate offset in range [-4095, 4095].
868    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
869    if (!CE) return false;
870    int64_t Val = CE->getValue();
871    return Val > -4096 && Val < 4096;
872  }
873  bool isAddrMode3() const {
874    // If we have an immediate that's not a constant, treat it as a label
875    // reference needing a fixup. If it is a constant, it's something else
876    // and we reject it.
877    if (isImm() && !isa<MCConstantExpr>(getImm()))
878      return true;
879    if (!isMemory() || Memory.Alignment != 0) return false;
880    // No shifts are legal for AM3.
881    if (Memory.ShiftType != ARM_AM::no_shift) return false;
882    // Check for register offset.
883    if (Memory.OffsetRegNum) return true;
884    // Immediate offset in range [-255, 255].
885    if (!Memory.OffsetImm) return true;
886    int64_t Val = Memory.OffsetImm->getValue();
887    return Val > -256 && Val < 256;
888  }
889  bool isAM3Offset() const {
890    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
891      return false;
892    if (Kind == k_PostIndexRegister)
893      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
894    // Immediate offset in range [-255, 255].
895    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
896    if (!CE) return false;
897    int64_t Val = CE->getValue();
898    // Special case, #-0 is INT32_MIN.
899    return (Val > -256 && Val < 256) || Val == INT32_MIN;
900  }
901  bool isAddrMode5() const {
902    // If we have an immediate that's not a constant, treat it as a label
903    // reference needing a fixup. If it is a constant, it's something else
904    // and we reject it.
905    if (isImm() && !isa<MCConstantExpr>(getImm()))
906      return true;
907    if (!isMemory() || Memory.Alignment != 0) return false;
908    // Check for register offset.
909    if (Memory.OffsetRegNum) return false;
910    // Immediate offset in range [-1020, 1020] and a multiple of 4.
911    if (!Memory.OffsetImm) return true;
912    int64_t Val = Memory.OffsetImm->getValue();
913    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
914      Val == INT32_MIN;
915  }
916  bool isMemTBB() const {
917    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
918        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
919      return false;
920    return true;
921  }
922  bool isMemTBH() const {
923    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
924        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
925        Memory.Alignment != 0 )
926      return false;
927    return true;
928  }
929  bool isMemRegOffset() const {
930    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
931      return false;
932    return true;
933  }
934  bool isT2MemRegOffset() const {
935    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
936        Memory.Alignment != 0)
937      return false;
938    // Only lsl #{0, 1, 2, 3} allowed.
939    if (Memory.ShiftType == ARM_AM::no_shift)
940      return true;
941    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
942      return false;
943    return true;
944  }
945  bool isMemThumbRR() const {
946    // Thumb reg+reg addressing is simple. Just two registers, a base and
947    // an offset. No shifts, negations or any other complicating factors.
948    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
949        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
950      return false;
951    return isARMLowRegister(Memory.BaseRegNum) &&
952      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
953  }
954  bool isMemThumbRIs4() const {
955    if (!isMemory() || Memory.OffsetRegNum != 0 ||
956        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
957      return false;
958    // Immediate offset, multiple of 4 in range [0, 124].
959    if (!Memory.OffsetImm) return true;
960    int64_t Val = Memory.OffsetImm->getValue();
961    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
962  }
963  bool isMemThumbRIs2() const {
964    if (!isMemory() || Memory.OffsetRegNum != 0 ||
965        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
966      return false;
967    // Immediate offset, multiple of 4 in range [0, 62].
968    if (!Memory.OffsetImm) return true;
969    int64_t Val = Memory.OffsetImm->getValue();
970    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
971  }
972  bool isMemThumbRIs1() const {
973    if (!isMemory() || Memory.OffsetRegNum != 0 ||
974        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
975      return false;
976    // Immediate offset in range [0, 31].
977    if (!Memory.OffsetImm) return true;
978    int64_t Val = Memory.OffsetImm->getValue();
979    return Val >= 0 && Val <= 31;
980  }
981  bool isMemThumbSPI() const {
982    if (!isMemory() || Memory.OffsetRegNum != 0 ||
983        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
984      return false;
985    // Immediate offset, multiple of 4 in range [0, 1020].
986    if (!Memory.OffsetImm) return true;
987    int64_t Val = Memory.OffsetImm->getValue();
988    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
989  }
990  bool isMemImm8s4Offset() const {
991    // If we have an immediate that's not a constant, treat it as a label
992    // reference needing a fixup. If it is a constant, it's something else
993    // and we reject it.
994    if (isImm() && !isa<MCConstantExpr>(getImm()))
995      return true;
996    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
997      return false;
998    // Immediate offset a multiple of 4 in range [-1020, 1020].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1002  }
1003  bool isMemImm0_1020s4Offset() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1005      return false;
1006    // Immediate offset a multiple of 4 in range [0, 1020].
1007    if (!Memory.OffsetImm) return true;
1008    int64_t Val = Memory.OffsetImm->getValue();
1009    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1010  }
1011  bool isMemImm8Offset() const {
1012    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1013      return false;
1014    // Base reg of PC isn't allowed for these encodings.
1015    if (Memory.BaseRegNum == ARM::PC) return false;
1016    // Immediate offset in range [-255, 255].
1017    if (!Memory.OffsetImm) return true;
1018    int64_t Val = Memory.OffsetImm->getValue();
1019    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1020  }
1021  bool isMemPosImm8Offset() const {
1022    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1023      return false;
1024    // Immediate offset in range [0, 255].
1025    if (!Memory.OffsetImm) return true;
1026    int64_t Val = Memory.OffsetImm->getValue();
1027    return Val >= 0 && Val < 256;
1028  }
1029  bool isMemNegImm8Offset() const {
1030    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1031      return false;
1032    // Base reg of PC isn't allowed for these encodings.
1033    if (Memory.BaseRegNum == ARM::PC) return false;
1034    // Immediate offset in range [-255, -1].
1035    if (!Memory.OffsetImm) return false;
1036    int64_t Val = Memory.OffsetImm->getValue();
1037    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1038  }
1039  bool isMemUImm12Offset() const {
1040    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1041      return false;
1042    // Immediate offset in range [0, 4095].
1043    if (!Memory.OffsetImm) return true;
1044    int64_t Val = Memory.OffsetImm->getValue();
1045    return (Val >= 0 && Val < 4096);
1046  }
1047  bool isMemImm12Offset() const {
1048    // If we have an immediate that's not a constant, treat it as a label
1049    // reference needing a fixup. If it is a constant, it's something else
1050    // and we reject it.
1051    if (isImm() && !isa<MCConstantExpr>(getImm()))
1052      return true;
1053
1054    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1055      return false;
1056    // Immediate offset in range [-4095, 4095].
1057    if (!Memory.OffsetImm) return true;
1058    int64_t Val = Memory.OffsetImm->getValue();
1059    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1060  }
1061  bool isPostIdxImm8() const {
1062    if (!isImm()) return false;
1063    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1064    if (!CE) return false;
1065    int64_t Val = CE->getValue();
1066    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1067  }
1068  bool isPostIdxImm8s4() const {
1069    if (!isImm()) return false;
1070    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1071    if (!CE) return false;
1072    int64_t Val = CE->getValue();
1073    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1074      (Val == INT32_MIN);
1075  }
1076
1077  bool isMSRMask() const { return Kind == k_MSRMask; }
1078  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1079
1080  // NEON operands.
1081  bool isSingleSpacedVectorList() const {
1082    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1083  }
1084  bool isDoubleSpacedVectorList() const {
1085    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1086  }
1087  bool isVecListOneD() const {
1088    if (!isSingleSpacedVectorList()) return false;
1089    return VectorList.Count == 1;
1090  }
1091
1092  bool isVecListDPair() const {
1093    if (!isSingleSpacedVectorList()) return false;
1094    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1095              .contains(VectorList.RegNum));
1096  }
1097
1098  bool isVecListThreeD() const {
1099    if (!isSingleSpacedVectorList()) return false;
1100    return VectorList.Count == 3;
1101  }
1102
1103  bool isVecListFourD() const {
1104    if (!isSingleSpacedVectorList()) return false;
1105    return VectorList.Count == 4;
1106  }
1107
1108  bool isVecListDPairSpaced() const {
1109    if (isSingleSpacedVectorList()) return false;
1110    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1111              .contains(VectorList.RegNum));
1112  }
1113
1114  bool isVecListThreeQ() const {
1115    if (!isDoubleSpacedVectorList()) return false;
1116    return VectorList.Count == 3;
1117  }
1118
1119  bool isVecListFourQ() const {
1120    if (!isDoubleSpacedVectorList()) return false;
1121    return VectorList.Count == 4;
1122  }
1123
1124  bool isSingleSpacedVectorAllLanes() const {
1125    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1126  }
1127  bool isDoubleSpacedVectorAllLanes() const {
1128    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1129  }
1130  bool isVecListOneDAllLanes() const {
1131    if (!isSingleSpacedVectorAllLanes()) return false;
1132    return VectorList.Count == 1;
1133  }
1134
1135  bool isVecListDPairAllLanes() const {
1136    if (!isSingleSpacedVectorAllLanes()) return false;
1137    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1138              .contains(VectorList.RegNum));
1139  }
1140
1141  bool isVecListDPairSpacedAllLanes() const {
1142    if (!isDoubleSpacedVectorAllLanes()) return false;
1143    return VectorList.Count == 2;
1144  }
1145
1146  bool isVecListThreeDAllLanes() const {
1147    if (!isSingleSpacedVectorAllLanes()) return false;
1148    return VectorList.Count == 3;
1149  }
1150
1151  bool isVecListThreeQAllLanes() const {
1152    if (!isDoubleSpacedVectorAllLanes()) return false;
1153    return VectorList.Count == 3;
1154  }
1155
1156  bool isVecListFourDAllLanes() const {
1157    if (!isSingleSpacedVectorAllLanes()) return false;
1158    return VectorList.Count == 4;
1159  }
1160
1161  bool isVecListFourQAllLanes() const {
1162    if (!isDoubleSpacedVectorAllLanes()) return false;
1163    return VectorList.Count == 4;
1164  }
1165
1166  bool isSingleSpacedVectorIndexed() const {
1167    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1168  }
1169  bool isDoubleSpacedVectorIndexed() const {
1170    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1171  }
1172  bool isVecListOneDByteIndexed() const {
1173    if (!isSingleSpacedVectorIndexed()) return false;
1174    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1175  }
1176
1177  bool isVecListOneDHWordIndexed() const {
1178    if (!isSingleSpacedVectorIndexed()) return false;
1179    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1180  }
1181
1182  bool isVecListOneDWordIndexed() const {
1183    if (!isSingleSpacedVectorIndexed()) return false;
1184    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1185  }
1186
1187  bool isVecListTwoDByteIndexed() const {
1188    if (!isSingleSpacedVectorIndexed()) return false;
1189    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1190  }
1191
1192  bool isVecListTwoDHWordIndexed() const {
1193    if (!isSingleSpacedVectorIndexed()) return false;
1194    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1195  }
1196
1197  bool isVecListTwoQWordIndexed() const {
1198    if (!isDoubleSpacedVectorIndexed()) return false;
1199    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1200  }
1201
1202  bool isVecListTwoQHWordIndexed() const {
1203    if (!isDoubleSpacedVectorIndexed()) return false;
1204    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1205  }
1206
1207  bool isVecListTwoDWordIndexed() const {
1208    if (!isSingleSpacedVectorIndexed()) return false;
1209    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1210  }
1211
1212  bool isVecListThreeDByteIndexed() const {
1213    if (!isSingleSpacedVectorIndexed()) return false;
1214    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1215  }
1216
1217  bool isVecListThreeDHWordIndexed() const {
1218    if (!isSingleSpacedVectorIndexed()) return false;
1219    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1220  }
1221
1222  bool isVecListThreeQWordIndexed() const {
1223    if (!isDoubleSpacedVectorIndexed()) return false;
1224    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1225  }
1226
1227  bool isVecListThreeQHWordIndexed() const {
1228    if (!isDoubleSpacedVectorIndexed()) return false;
1229    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1230  }
1231
1232  bool isVecListThreeDWordIndexed() const {
1233    if (!isSingleSpacedVectorIndexed()) return false;
1234    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1235  }
1236
1237  bool isVecListFourDByteIndexed() const {
1238    if (!isSingleSpacedVectorIndexed()) return false;
1239    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1240  }
1241
1242  bool isVecListFourDHWordIndexed() const {
1243    if (!isSingleSpacedVectorIndexed()) return false;
1244    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1245  }
1246
1247  bool isVecListFourQWordIndexed() const {
1248    if (!isDoubleSpacedVectorIndexed()) return false;
1249    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1250  }
1251
1252  bool isVecListFourQHWordIndexed() const {
1253    if (!isDoubleSpacedVectorIndexed()) return false;
1254    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1255  }
1256
1257  bool isVecListFourDWordIndexed() const {
1258    if (!isSingleSpacedVectorIndexed()) return false;
1259    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1260  }
1261
1262  bool isVectorIndex8() const {
1263    if (Kind != k_VectorIndex) return false;
1264    return VectorIndex.Val < 8;
1265  }
1266  bool isVectorIndex16() const {
1267    if (Kind != k_VectorIndex) return false;
1268    return VectorIndex.Val < 4;
1269  }
1270  bool isVectorIndex32() const {
1271    if (Kind != k_VectorIndex) return false;
1272    return VectorIndex.Val < 2;
1273  }
1274
1275  bool isNEONi8splat() const {
1276    if (!isImm()) return false;
1277    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1278    // Must be a constant.
1279    if (!CE) return false;
1280    int64_t Value = CE->getValue();
1281    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1282    // value.
1283    return Value >= 0 && Value < 256;
1284  }
1285
1286  bool isNEONi16splat() const {
1287    if (!isImm()) return false;
1288    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1289    // Must be a constant.
1290    if (!CE) return false;
1291    int64_t Value = CE->getValue();
1292    // i16 value in the range [0,255] or [0x0100, 0xff00]
1293    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1294  }
1295
1296  bool isNEONi32splat() const {
1297    if (!isImm()) return false;
1298    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1299    // Must be a constant.
1300    if (!CE) return false;
1301    int64_t Value = CE->getValue();
1302    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1303    return (Value >= 0 && Value < 256) ||
1304      (Value >= 0x0100 && Value <= 0xff00) ||
1305      (Value >= 0x010000 && Value <= 0xff0000) ||
1306      (Value >= 0x01000000 && Value <= 0xff000000);
1307  }
1308
1309  bool isNEONi32vmov() const {
1310    if (!isImm()) return false;
1311    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1312    // Must be a constant.
1313    if (!CE) return false;
1314    int64_t Value = CE->getValue();
1315    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1316    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1317    return (Value >= 0 && Value < 256) ||
1318      (Value >= 0x0100 && Value <= 0xff00) ||
1319      (Value >= 0x010000 && Value <= 0xff0000) ||
1320      (Value >= 0x01000000 && Value <= 0xff000000) ||
1321      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1322      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1323  }
1324  bool isNEONi32vmovNeg() const {
1325    if (!isImm()) return false;
1326    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1327    // Must be a constant.
1328    if (!CE) return false;
1329    int64_t Value = ~CE->getValue();
1330    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1331    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1332    return (Value >= 0 && Value < 256) ||
1333      (Value >= 0x0100 && Value <= 0xff00) ||
1334      (Value >= 0x010000 && Value <= 0xff0000) ||
1335      (Value >= 0x01000000 && Value <= 0xff000000) ||
1336      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1337      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1338  }
1339
1340  bool isNEONi64splat() const {
1341    if (!isImm()) return false;
1342    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1343    // Must be a constant.
1344    if (!CE) return false;
1345    uint64_t Value = CE->getValue();
1346    // i64 value with each byte being either 0 or 0xff.
1347    for (unsigned i = 0; i < 8; ++i)
1348      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1349    return true;
1350  }
1351
1352  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1353    // Add as immediates when possible.  Null MCExpr = 0.
1354    if (Expr == 0)
1355      Inst.addOperand(MCOperand::CreateImm(0));
1356    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1357      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1358    else
1359      Inst.addOperand(MCOperand::CreateExpr(Expr));
1360  }
1361
1362  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1363    assert(N == 2 && "Invalid number of operands!");
1364    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1365    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1366    Inst.addOperand(MCOperand::CreateReg(RegNum));
1367  }
1368
1369  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1370    assert(N == 1 && "Invalid number of operands!");
1371    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1372  }
1373
1374  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1375    assert(N == 1 && "Invalid number of operands!");
1376    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1377  }
1378
1379  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1380    assert(N == 1 && "Invalid number of operands!");
1381    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1382  }
1383
1384  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1385    assert(N == 1 && "Invalid number of operands!");
1386    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1387  }
1388
1389  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1390    assert(N == 1 && "Invalid number of operands!");
1391    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1392  }
1393
1394  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1395    assert(N == 1 && "Invalid number of operands!");
1396    Inst.addOperand(MCOperand::CreateReg(getReg()));
1397  }
1398
1399  void addRegOperands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    Inst.addOperand(MCOperand::CreateReg(getReg()));
1402  }
1403
1404  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1405    assert(N == 3 && "Invalid number of operands!");
1406    assert(isRegShiftedReg() &&
1407           "addRegShiftedRegOperands() on non RegShiftedReg!");
1408    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1409    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1410    Inst.addOperand(MCOperand::CreateImm(
1411      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1412  }
1413
1414  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1415    assert(N == 2 && "Invalid number of operands!");
1416    assert(isRegShiftedImm() &&
1417           "addRegShiftedImmOperands() on non RegShiftedImm!");
1418    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1419    Inst.addOperand(MCOperand::CreateImm(
1420      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1421  }
1422
1423  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1424    assert(N == 1 && "Invalid number of operands!");
1425    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1426                                         ShifterImm.Imm));
1427  }
1428
1429  void addRegListOperands(MCInst &Inst, unsigned N) const {
1430    assert(N == 1 && "Invalid number of operands!");
1431    const SmallVectorImpl<unsigned> &RegList = getRegList();
1432    for (SmallVectorImpl<unsigned>::const_iterator
1433           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1434      Inst.addOperand(MCOperand::CreateReg(*I));
1435  }
1436
1437  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1438    addRegListOperands(Inst, N);
1439  }
1440
1441  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1442    addRegListOperands(Inst, N);
1443  }
1444
1445  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1446    assert(N == 1 && "Invalid number of operands!");
1447    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1448    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1449  }
1450
1451  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    // Munge the lsb/width into a bitfield mask.
1454    unsigned lsb = Bitfield.LSB;
1455    unsigned width = Bitfield.Width;
1456    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1457    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1458                      (32 - (lsb + width)));
1459    Inst.addOperand(MCOperand::CreateImm(Mask));
1460  }
1461
1462  void addImmOperands(MCInst &Inst, unsigned N) const {
1463    assert(N == 1 && "Invalid number of operands!");
1464    addExpr(Inst, getImm());
1465  }
1466
1467  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1470    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1471  }
1472
1473  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1474    assert(N == 1 && "Invalid number of operands!");
1475    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1476    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1477  }
1478
1479  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1480    assert(N == 1 && "Invalid number of operands!");
1481    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1482    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1483    Inst.addOperand(MCOperand::CreateImm(Val));
1484  }
1485
1486  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1487    assert(N == 1 && "Invalid number of operands!");
1488    // FIXME: We really want to scale the value here, but the LDRD/STRD
1489    // instruction don't encode operands that way yet.
1490    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1491    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1492  }
1493
1494  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1495    assert(N == 1 && "Invalid number of operands!");
1496    // The immediate is scaled by four in the encoding and is stored
1497    // in the MCInst as such. Lop off the low two bits here.
1498    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1499    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1500  }
1501
1502  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1503    assert(N == 1 && "Invalid number of operands!");
1504    // The immediate is scaled by four in the encoding and is stored
1505    // in the MCInst as such. Lop off the low two bits here.
1506    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1507    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1508  }
1509
1510  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1511    assert(N == 1 && "Invalid number of operands!");
1512    // The constant encodes as the immediate-1, and we store in the instruction
1513    // the bits as encoded, so subtract off one here.
1514    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1515    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1516  }
1517
1518  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1519    assert(N == 1 && "Invalid number of operands!");
1520    // The constant encodes as the immediate-1, and we store in the instruction
1521    // the bits as encoded, so subtract off one here.
1522    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1523    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1524  }
1525
1526  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1527    assert(N == 1 && "Invalid number of operands!");
1528    // The constant encodes as the immediate, except for 32, which encodes as
1529    // zero.
1530    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1531    unsigned Imm = CE->getValue();
1532    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1533  }
1534
1535  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1536    assert(N == 1 && "Invalid number of operands!");
1537    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1538    // the instruction as well.
1539    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1540    int Val = CE->getValue();
1541    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1542  }
1543
1544  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1545    assert(N == 1 && "Invalid number of operands!");
1546    // The operand is actually a t2_so_imm, but we have its bitwise
1547    // negation in the assembly source, so twiddle it here.
1548    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1549    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1550  }
1551
1552  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1553    assert(N == 1 && "Invalid number of operands!");
1554    // The operand is actually a t2_so_imm, but we have its
1555    // negation in the assembly source, so twiddle it here.
1556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1557    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1558  }
1559
1560  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1561    assert(N == 1 && "Invalid number of operands!");
1562    // The operand is actually a so_imm, but we have its bitwise
1563    // negation in the assembly source, so twiddle it here.
1564    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1565    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1566  }
1567
1568  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1569    assert(N == 1 && "Invalid number of operands!");
1570    // The operand is actually a so_imm, but we have its
1571    // negation in the assembly source, so twiddle it here.
1572    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1573    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1574  }
1575
1576  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1577    assert(N == 1 && "Invalid number of operands!");
1578    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1579  }
1580
1581  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1582    assert(N == 1 && "Invalid number of operands!");
1583    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1584  }
1585
1586  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1587    assert(N == 1 && "Invalid number of operands!");
1588    int32_t Imm = Memory.OffsetImm->getValue();
1589    // FIXME: Handle #-0
1590    if (Imm == INT32_MIN) Imm = 0;
1591    Inst.addOperand(MCOperand::CreateImm(Imm));
1592  }
1593
1594  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1595    assert(N == 2 && "Invalid number of operands!");
1596    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1597    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1598  }
1599
1600  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1601    assert(N == 3 && "Invalid number of operands!");
1602    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1603    if (!Memory.OffsetRegNum) {
1604      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1605      // Special case for #-0
1606      if (Val == INT32_MIN) Val = 0;
1607      if (Val < 0) Val = -Val;
1608      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1609    } else {
1610      // For register offset, we encode the shift type and negation flag
1611      // here.
1612      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1613                              Memory.ShiftImm, Memory.ShiftType);
1614    }
1615    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1616    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1617    Inst.addOperand(MCOperand::CreateImm(Val));
1618  }
1619
1620  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1621    assert(N == 2 && "Invalid number of operands!");
1622    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1623    assert(CE && "non-constant AM2OffsetImm operand!");
1624    int32_t Val = CE->getValue();
1625    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1626    // Special case for #-0
1627    if (Val == INT32_MIN) Val = 0;
1628    if (Val < 0) Val = -Val;
1629    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1630    Inst.addOperand(MCOperand::CreateReg(0));
1631    Inst.addOperand(MCOperand::CreateImm(Val));
1632  }
1633
1634  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1635    assert(N == 3 && "Invalid number of operands!");
1636    // If we have an immediate that's not a constant, treat it as a label
1637    // reference needing a fixup. If it is a constant, it's something else
1638    // and we reject it.
1639    if (isImm()) {
1640      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1641      Inst.addOperand(MCOperand::CreateReg(0));
1642      Inst.addOperand(MCOperand::CreateImm(0));
1643      return;
1644    }
1645
1646    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1647    if (!Memory.OffsetRegNum) {
1648      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1649      // Special case for #-0
1650      if (Val == INT32_MIN) Val = 0;
1651      if (Val < 0) Val = -Val;
1652      Val = ARM_AM::getAM3Opc(AddSub, Val);
1653    } else {
1654      // For register offset, we encode the shift type and negation flag
1655      // here.
1656      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1657    }
1658    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1659    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1660    Inst.addOperand(MCOperand::CreateImm(Val));
1661  }
1662
1663  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1664    assert(N == 2 && "Invalid number of operands!");
1665    if (Kind == k_PostIndexRegister) {
1666      int32_t Val =
1667        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1668      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1669      Inst.addOperand(MCOperand::CreateImm(Val));
1670      return;
1671    }
1672
1673    // Constant offset.
1674    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1675    int32_t Val = CE->getValue();
1676    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1677    // Special case for #-0
1678    if (Val == INT32_MIN) Val = 0;
1679    if (Val < 0) Val = -Val;
1680    Val = ARM_AM::getAM3Opc(AddSub, Val);
1681    Inst.addOperand(MCOperand::CreateReg(0));
1682    Inst.addOperand(MCOperand::CreateImm(Val));
1683  }
1684
1685  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1686    assert(N == 2 && "Invalid number of operands!");
1687    // If we have an immediate that's not a constant, treat it as a label
1688    // reference needing a fixup. If it is a constant, it's something else
1689    // and we reject it.
1690    if (isImm()) {
1691      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1692      Inst.addOperand(MCOperand::CreateImm(0));
1693      return;
1694    }
1695
1696    // The lower two bits are always zero and as such are not encoded.
1697    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1698    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1699    // Special case for #-0
1700    if (Val == INT32_MIN) Val = 0;
1701    if (Val < 0) Val = -Val;
1702    Val = ARM_AM::getAM5Opc(AddSub, Val);
1703    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1704    Inst.addOperand(MCOperand::CreateImm(Val));
1705  }
1706
1707  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1708    assert(N == 2 && "Invalid number of operands!");
1709    // If we have an immediate that's not a constant, treat it as a label
1710    // reference needing a fixup. If it is a constant, it's something else
1711    // and we reject it.
1712    if (isImm()) {
1713      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1714      Inst.addOperand(MCOperand::CreateImm(0));
1715      return;
1716    }
1717
1718    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1719    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1720    Inst.addOperand(MCOperand::CreateImm(Val));
1721  }
1722
1723  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1724    assert(N == 2 && "Invalid number of operands!");
1725    // The lower two bits are always zero and as such are not encoded.
1726    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1727    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1728    Inst.addOperand(MCOperand::CreateImm(Val));
1729  }
1730
1731  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1732    assert(N == 2 && "Invalid number of operands!");
1733    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1734    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1735    Inst.addOperand(MCOperand::CreateImm(Val));
1736  }
1737
1738  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1739    addMemImm8OffsetOperands(Inst, N);
1740  }
1741
1742  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1743    addMemImm8OffsetOperands(Inst, N);
1744  }
1745
1746  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1747    assert(N == 2 && "Invalid number of operands!");
1748    // If this is an immediate, it's a label reference.
1749    if (isImm()) {
1750      addExpr(Inst, getImm());
1751      Inst.addOperand(MCOperand::CreateImm(0));
1752      return;
1753    }
1754
1755    // Otherwise, it's a normal memory reg+offset.
1756    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1757    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1758    Inst.addOperand(MCOperand::CreateImm(Val));
1759  }
1760
1761  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1762    assert(N == 2 && "Invalid number of operands!");
1763    // If this is an immediate, it's a label reference.
1764    if (isImm()) {
1765      addExpr(Inst, getImm());
1766      Inst.addOperand(MCOperand::CreateImm(0));
1767      return;
1768    }
1769
1770    // Otherwise, it's a normal memory reg+offset.
1771    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1772    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1773    Inst.addOperand(MCOperand::CreateImm(Val));
1774  }
1775
1776  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1777    assert(N == 2 && "Invalid number of operands!");
1778    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1779    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1780  }
1781
1782  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1783    assert(N == 2 && "Invalid number of operands!");
1784    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1785    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1786  }
1787
1788  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1789    assert(N == 3 && "Invalid number of operands!");
1790    unsigned Val =
1791      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1792                        Memory.ShiftImm, Memory.ShiftType);
1793    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1794    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1795    Inst.addOperand(MCOperand::CreateImm(Val));
1796  }
1797
1798  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1799    assert(N == 3 && "Invalid number of operands!");
1800    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1801    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1802    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1803  }
1804
1805  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1806    assert(N == 2 && "Invalid number of operands!");
1807    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1808    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1809  }
1810
1811  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1812    assert(N == 2 && "Invalid number of operands!");
1813    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1814    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1815    Inst.addOperand(MCOperand::CreateImm(Val));
1816  }
1817
1818  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1819    assert(N == 2 && "Invalid number of operands!");
1820    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1821    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1822    Inst.addOperand(MCOperand::CreateImm(Val));
1823  }
1824
1825  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1826    assert(N == 2 && "Invalid number of operands!");
1827    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1828    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1829    Inst.addOperand(MCOperand::CreateImm(Val));
1830  }
1831
1832  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1833    assert(N == 2 && "Invalid number of operands!");
1834    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1835    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1836    Inst.addOperand(MCOperand::CreateImm(Val));
1837  }
1838
1839  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1840    assert(N == 1 && "Invalid number of operands!");
1841    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1842    assert(CE && "non-constant post-idx-imm8 operand!");
1843    int Imm = CE->getValue();
1844    bool isAdd = Imm >= 0;
1845    if (Imm == INT32_MIN) Imm = 0;
1846    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1847    Inst.addOperand(MCOperand::CreateImm(Imm));
1848  }
1849
1850  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1851    assert(N == 1 && "Invalid number of operands!");
1852    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1853    assert(CE && "non-constant post-idx-imm8s4 operand!");
1854    int Imm = CE->getValue();
1855    bool isAdd = Imm >= 0;
1856    if (Imm == INT32_MIN) Imm = 0;
1857    // Immediate is scaled by 4.
1858    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1859    Inst.addOperand(MCOperand::CreateImm(Imm));
1860  }
1861
1862  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1863    assert(N == 2 && "Invalid number of operands!");
1864    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1865    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1866  }
1867
1868  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1869    assert(N == 2 && "Invalid number of operands!");
1870    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1871    // The sign, shift type, and shift amount are encoded in a single operand
1872    // using the AM2 encoding helpers.
1873    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1874    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1875                                     PostIdxReg.ShiftTy);
1876    Inst.addOperand(MCOperand::CreateImm(Imm));
1877  }
1878
1879  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1880    assert(N == 1 && "Invalid number of operands!");
1881    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1882  }
1883
1884  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1885    assert(N == 1 && "Invalid number of operands!");
1886    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1887  }
1888
1889  void addVecListOperands(MCInst &Inst, unsigned N) const {
1890    assert(N == 1 && "Invalid number of operands!");
1891    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1892  }
1893
1894  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1895    assert(N == 2 && "Invalid number of operands!");
1896    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1897    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1898  }
1899
1900  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1901    assert(N == 1 && "Invalid number of operands!");
1902    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1903  }
1904
1905  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1906    assert(N == 1 && "Invalid number of operands!");
1907    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1908  }
1909
1910  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1911    assert(N == 1 && "Invalid number of operands!");
1912    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1913  }
1914
1915  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1916    assert(N == 1 && "Invalid number of operands!");
1917    // The immediate encodes the type of constant as well as the value.
1918    // Mask in that this is an i8 splat.
1919    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1920    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1921  }
1922
1923  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1924    assert(N == 1 && "Invalid number of operands!");
1925    // The immediate encodes the type of constant as well as the value.
1926    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1927    unsigned Value = CE->getValue();
1928    if (Value >= 256)
1929      Value = (Value >> 8) | 0xa00;
1930    else
1931      Value |= 0x800;
1932    Inst.addOperand(MCOperand::CreateImm(Value));
1933  }
1934
1935  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1936    assert(N == 1 && "Invalid number of operands!");
1937    // The immediate encodes the type of constant as well as the value.
1938    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1939    unsigned Value = CE->getValue();
1940    if (Value >= 256 && Value <= 0xff00)
1941      Value = (Value >> 8) | 0x200;
1942    else if (Value > 0xffff && Value <= 0xff0000)
1943      Value = (Value >> 16) | 0x400;
1944    else if (Value > 0xffffff)
1945      Value = (Value >> 24) | 0x600;
1946    Inst.addOperand(MCOperand::CreateImm(Value));
1947  }
1948
1949  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1950    assert(N == 1 && "Invalid number of operands!");
1951    // The immediate encodes the type of constant as well as the value.
1952    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1953    unsigned Value = CE->getValue();
1954    if (Value >= 256 && Value <= 0xffff)
1955      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1956    else if (Value > 0xffff && Value <= 0xffffff)
1957      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1958    else if (Value > 0xffffff)
1959      Value = (Value >> 24) | 0x600;
1960    Inst.addOperand(MCOperand::CreateImm(Value));
1961  }
1962
1963  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1964    assert(N == 1 && "Invalid number of operands!");
1965    // The immediate encodes the type of constant as well as the value.
1966    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1967    unsigned Value = ~CE->getValue();
1968    if (Value >= 256 && Value <= 0xffff)
1969      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1970    else if (Value > 0xffff && Value <= 0xffffff)
1971      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1972    else if (Value > 0xffffff)
1973      Value = (Value >> 24) | 0x600;
1974    Inst.addOperand(MCOperand::CreateImm(Value));
1975  }
1976
1977  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1978    assert(N == 1 && "Invalid number of operands!");
1979    // The immediate encodes the type of constant as well as the value.
1980    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1981    uint64_t Value = CE->getValue();
1982    unsigned Imm = 0;
1983    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1984      Imm |= (Value & 1) << i;
1985    }
1986    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1987  }
1988
1989  virtual void print(raw_ostream &OS) const;
1990
1991  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1992    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1993    Op->ITMask.Mask = Mask;
1994    Op->StartLoc = S;
1995    Op->EndLoc = S;
1996    return Op;
1997  }
1998
1999  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2000    ARMOperand *Op = new ARMOperand(k_CondCode);
2001    Op->CC.Val = CC;
2002    Op->StartLoc = S;
2003    Op->EndLoc = S;
2004    return Op;
2005  }
2006
2007  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2008    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2009    Op->Cop.Val = CopVal;
2010    Op->StartLoc = S;
2011    Op->EndLoc = S;
2012    return Op;
2013  }
2014
2015  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2016    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2017    Op->Cop.Val = CopVal;
2018    Op->StartLoc = S;
2019    Op->EndLoc = S;
2020    return Op;
2021  }
2022
2023  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2024    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2025    Op->Cop.Val = Val;
2026    Op->StartLoc = S;
2027    Op->EndLoc = E;
2028    return Op;
2029  }
2030
2031  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2032    ARMOperand *Op = new ARMOperand(k_CCOut);
2033    Op->Reg.RegNum = RegNum;
2034    Op->StartLoc = S;
2035    Op->EndLoc = S;
2036    return Op;
2037  }
2038
2039  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2040    ARMOperand *Op = new ARMOperand(k_Token);
2041    Op->Tok.Data = Str.data();
2042    Op->Tok.Length = Str.size();
2043    Op->StartLoc = S;
2044    Op->EndLoc = S;
2045    return Op;
2046  }
2047
2048  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2049    ARMOperand *Op = new ARMOperand(k_Register);
2050    Op->Reg.RegNum = RegNum;
2051    Op->StartLoc = S;
2052    Op->EndLoc = E;
2053    return Op;
2054  }
2055
2056  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2057                                           unsigned SrcReg,
2058                                           unsigned ShiftReg,
2059                                           unsigned ShiftImm,
2060                                           SMLoc S, SMLoc E) {
2061    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2062    Op->RegShiftedReg.ShiftTy = ShTy;
2063    Op->RegShiftedReg.SrcReg = SrcReg;
2064    Op->RegShiftedReg.ShiftReg = ShiftReg;
2065    Op->RegShiftedReg.ShiftImm = ShiftImm;
2066    Op->StartLoc = S;
2067    Op->EndLoc = E;
2068    return Op;
2069  }
2070
2071  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2072                                            unsigned SrcReg,
2073                                            unsigned ShiftImm,
2074                                            SMLoc S, SMLoc E) {
2075    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2076    Op->RegShiftedImm.ShiftTy = ShTy;
2077    Op->RegShiftedImm.SrcReg = SrcReg;
2078    Op->RegShiftedImm.ShiftImm = ShiftImm;
2079    Op->StartLoc = S;
2080    Op->EndLoc = E;
2081    return Op;
2082  }
2083
2084  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2085                                   SMLoc S, SMLoc E) {
2086    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2087    Op->ShifterImm.isASR = isASR;
2088    Op->ShifterImm.Imm = Imm;
2089    Op->StartLoc = S;
2090    Op->EndLoc = E;
2091    return Op;
2092  }
2093
2094  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2095    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2096    Op->RotImm.Imm = Imm;
2097    Op->StartLoc = S;
2098    Op->EndLoc = E;
2099    return Op;
2100  }
2101
2102  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2103                                    SMLoc S, SMLoc E) {
2104    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2105    Op->Bitfield.LSB = LSB;
2106    Op->Bitfield.Width = Width;
2107    Op->StartLoc = S;
2108    Op->EndLoc = E;
2109    return Op;
2110  }
2111
2112  static ARMOperand *
2113  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2114                SMLoc StartLoc, SMLoc EndLoc) {
2115    KindTy Kind = k_RegisterList;
2116
2117    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2118      Kind = k_DPRRegisterList;
2119    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2120             contains(Regs.front().first))
2121      Kind = k_SPRRegisterList;
2122
2123    ARMOperand *Op = new ARMOperand(Kind);
2124    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2125           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2126      Op->Registers.push_back(I->first);
2127    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2128    Op->StartLoc = StartLoc;
2129    Op->EndLoc = EndLoc;
2130    return Op;
2131  }
2132
2133  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2134                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2135    ARMOperand *Op = new ARMOperand(k_VectorList);
2136    Op->VectorList.RegNum = RegNum;
2137    Op->VectorList.Count = Count;
2138    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2139    Op->StartLoc = S;
2140    Op->EndLoc = E;
2141    return Op;
2142  }
2143
2144  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2145                                              bool isDoubleSpaced,
2146                                              SMLoc S, SMLoc E) {
2147    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2148    Op->VectorList.RegNum = RegNum;
2149    Op->VectorList.Count = Count;
2150    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2151    Op->StartLoc = S;
2152    Op->EndLoc = E;
2153    return Op;
2154  }
2155
2156  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2157                                             unsigned Index,
2158                                             bool isDoubleSpaced,
2159                                             SMLoc S, SMLoc E) {
2160    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2161    Op->VectorList.RegNum = RegNum;
2162    Op->VectorList.Count = Count;
2163    Op->VectorList.LaneIndex = Index;
2164    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2165    Op->StartLoc = S;
2166    Op->EndLoc = E;
2167    return Op;
2168  }
2169
2170  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2171                                       MCContext &Ctx) {
2172    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2173    Op->VectorIndex.Val = Idx;
2174    Op->StartLoc = S;
2175    Op->EndLoc = E;
2176    return Op;
2177  }
2178
2179  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2180    ARMOperand *Op = new ARMOperand(k_Immediate);
2181    Op->Imm.Val = Val;
2182    Op->StartLoc = S;
2183    Op->EndLoc = E;
2184    return Op;
2185  }
2186
2187  static ARMOperand *CreateMem(unsigned BaseRegNum,
2188                               const MCConstantExpr *OffsetImm,
2189                               unsigned OffsetRegNum,
2190                               ARM_AM::ShiftOpc ShiftType,
2191                               unsigned ShiftImm,
2192                               unsigned Alignment,
2193                               bool isNegative,
2194                               SMLoc S, SMLoc E) {
2195    ARMOperand *Op = new ARMOperand(k_Memory);
2196    Op->Memory.BaseRegNum = BaseRegNum;
2197    Op->Memory.OffsetImm = OffsetImm;
2198    Op->Memory.OffsetRegNum = OffsetRegNum;
2199    Op->Memory.ShiftType = ShiftType;
2200    Op->Memory.ShiftImm = ShiftImm;
2201    Op->Memory.Alignment = Alignment;
2202    Op->Memory.isNegative = isNegative;
2203    Op->StartLoc = S;
2204    Op->EndLoc = E;
2205    return Op;
2206  }
2207
2208  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2209                                      ARM_AM::ShiftOpc ShiftTy,
2210                                      unsigned ShiftImm,
2211                                      SMLoc S, SMLoc E) {
2212    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2213    Op->PostIdxReg.RegNum = RegNum;
2214    Op->PostIdxReg.isAdd = isAdd;
2215    Op->PostIdxReg.ShiftTy = ShiftTy;
2216    Op->PostIdxReg.ShiftImm = ShiftImm;
2217    Op->StartLoc = S;
2218    Op->EndLoc = E;
2219    return Op;
2220  }
2221
2222  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2223    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2224    Op->MBOpt.Val = Opt;
2225    Op->StartLoc = S;
2226    Op->EndLoc = S;
2227    return Op;
2228  }
2229
2230  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2231    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2232    Op->IFlags.Val = IFlags;
2233    Op->StartLoc = S;
2234    Op->EndLoc = S;
2235    return Op;
2236  }
2237
2238  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2239    ARMOperand *Op = new ARMOperand(k_MSRMask);
2240    Op->MMask.Val = MMask;
2241    Op->StartLoc = S;
2242    Op->EndLoc = S;
2243    return Op;
2244  }
2245};
2246
2247} // end anonymous namespace.
2248
2249void ARMOperand::print(raw_ostream &OS) const {
2250  switch (Kind) {
2251  case k_CondCode:
2252    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2253    break;
2254  case k_CCOut:
2255    OS << "<ccout " << getReg() << ">";
2256    break;
2257  case k_ITCondMask: {
2258    static const char *MaskStr[] = {
2259      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2260      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2261    };
2262    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2263    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2264    break;
2265  }
2266  case k_CoprocNum:
2267    OS << "<coprocessor number: " << getCoproc() << ">";
2268    break;
2269  case k_CoprocReg:
2270    OS << "<coprocessor register: " << getCoproc() << ">";
2271    break;
2272  case k_CoprocOption:
2273    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2274    break;
2275  case k_MSRMask:
2276    OS << "<mask: " << getMSRMask() << ">";
2277    break;
2278  case k_Immediate:
2279    getImm()->print(OS);
2280    break;
2281  case k_MemBarrierOpt:
2282    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2283    break;
2284  case k_Memory:
2285    OS << "<memory "
2286       << " base:" << Memory.BaseRegNum;
2287    OS << ">";
2288    break;
2289  case k_PostIndexRegister:
2290    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2291       << PostIdxReg.RegNum;
2292    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2293      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2294         << PostIdxReg.ShiftImm;
2295    OS << ">";
2296    break;
2297  case k_ProcIFlags: {
2298    OS << "<ARM_PROC::";
2299    unsigned IFlags = getProcIFlags();
2300    for (int i=2; i >= 0; --i)
2301      if (IFlags & (1 << i))
2302        OS << ARM_PROC::IFlagsToString(1 << i);
2303    OS << ">";
2304    break;
2305  }
2306  case k_Register:
2307    OS << "<register " << getReg() << ">";
2308    break;
2309  case k_ShifterImmediate:
2310    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2311       << " #" << ShifterImm.Imm << ">";
2312    break;
2313  case k_ShiftedRegister:
2314    OS << "<so_reg_reg "
2315       << RegShiftedReg.SrcReg << " "
2316       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2317       << " " << RegShiftedReg.ShiftReg << ">";
2318    break;
2319  case k_ShiftedImmediate:
2320    OS << "<so_reg_imm "
2321       << RegShiftedImm.SrcReg << " "
2322       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2323       << " #" << RegShiftedImm.ShiftImm << ">";
2324    break;
2325  case k_RotateImmediate:
2326    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2327    break;
2328  case k_BitfieldDescriptor:
2329    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2330       << ", width: " << Bitfield.Width << ">";
2331    break;
2332  case k_RegisterList:
2333  case k_DPRRegisterList:
2334  case k_SPRRegisterList: {
2335    OS << "<register_list ";
2336
2337    const SmallVectorImpl<unsigned> &RegList = getRegList();
2338    for (SmallVectorImpl<unsigned>::const_iterator
2339           I = RegList.begin(), E = RegList.end(); I != E; ) {
2340      OS << *I;
2341      if (++I < E) OS << ", ";
2342    }
2343
2344    OS << ">";
2345    break;
2346  }
2347  case k_VectorList:
2348    OS << "<vector_list " << VectorList.Count << " * "
2349       << VectorList.RegNum << ">";
2350    break;
2351  case k_VectorListAllLanes:
2352    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2353       << VectorList.RegNum << ">";
2354    break;
2355  case k_VectorListIndexed:
2356    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2357       << VectorList.Count << " * " << VectorList.RegNum << ">";
2358    break;
2359  case k_Token:
2360    OS << "'" << getToken() << "'";
2361    break;
2362  case k_VectorIndex:
2363    OS << "<vectorindex " << getVectorIndex() << ">";
2364    break;
2365  }
2366}
2367
2368/// @name Auto-generated Match Functions
2369/// {
2370
2371static unsigned MatchRegisterName(StringRef Name);
2372
2373/// }
2374
2375bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2376                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2377  StartLoc = Parser.getTok().getLoc();
2378  RegNo = tryParseRegister();
2379  EndLoc = Parser.getTok().getLoc();
2380
2381  return (RegNo == (unsigned)-1);
2382}
2383
2384/// Try to parse a register name.  The token must be an Identifier when called,
2385/// and if it is a register name the token is eaten and the register number is
2386/// returned.  Otherwise return -1.
2387///
2388int ARMAsmParser::tryParseRegister() {
2389  const AsmToken &Tok = Parser.getTok();
2390  if (Tok.isNot(AsmToken::Identifier)) return -1;
2391
2392  std::string lowerCase = Tok.getString().lower();
2393  unsigned RegNum = MatchRegisterName(lowerCase);
2394  if (!RegNum) {
2395    RegNum = StringSwitch<unsigned>(lowerCase)
2396      .Case("r13", ARM::SP)
2397      .Case("r14", ARM::LR)
2398      .Case("r15", ARM::PC)
2399      .Case("ip", ARM::R12)
2400      // Additional register name aliases for 'gas' compatibility.
2401      .Case("a1", ARM::R0)
2402      .Case("a2", ARM::R1)
2403      .Case("a3", ARM::R2)
2404      .Case("a4", ARM::R3)
2405      .Case("v1", ARM::R4)
2406      .Case("v2", ARM::R5)
2407      .Case("v3", ARM::R6)
2408      .Case("v4", ARM::R7)
2409      .Case("v5", ARM::R8)
2410      .Case("v6", ARM::R9)
2411      .Case("v7", ARM::R10)
2412      .Case("v8", ARM::R11)
2413      .Case("sb", ARM::R9)
2414      .Case("sl", ARM::R10)
2415      .Case("fp", ARM::R11)
2416      .Default(0);
2417  }
2418  if (!RegNum) {
2419    // Check for aliases registered via .req. Canonicalize to lower case.
2420    // That's more consistent since register names are case insensitive, and
2421    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2422    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2423    // If no match, return failure.
2424    if (Entry == RegisterReqs.end())
2425      return -1;
2426    Parser.Lex(); // Eat identifier token.
2427    return Entry->getValue();
2428  }
2429
2430  Parser.Lex(); // Eat identifier token.
2431
2432  return RegNum;
2433}
2434
2435// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2436// If a recoverable error occurs, return 1. If an irrecoverable error
2437// occurs, return -1. An irrecoverable error is one where tokens have been
2438// consumed in the process of trying to parse the shifter (i.e., when it is
2439// indeed a shifter operand, but malformed).
2440int ARMAsmParser::tryParseShiftRegister(
2441                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2442  SMLoc S = Parser.getTok().getLoc();
2443  const AsmToken &Tok = Parser.getTok();
2444  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2445
2446  std::string lowerCase = Tok.getString().lower();
2447  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2448      .Case("asl", ARM_AM::lsl)
2449      .Case("lsl", ARM_AM::lsl)
2450      .Case("lsr", ARM_AM::lsr)
2451      .Case("asr", ARM_AM::asr)
2452      .Case("ror", ARM_AM::ror)
2453      .Case("rrx", ARM_AM::rrx)
2454      .Default(ARM_AM::no_shift);
2455
2456  if (ShiftTy == ARM_AM::no_shift)
2457    return 1;
2458
2459  Parser.Lex(); // Eat the operator.
2460
2461  // The source register for the shift has already been added to the
2462  // operand list, so we need to pop it off and combine it into the shifted
2463  // register operand instead.
2464  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2465  if (!PrevOp->isReg())
2466    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2467  int SrcReg = PrevOp->getReg();
2468  int64_t Imm = 0;
2469  int ShiftReg = 0;
2470  if (ShiftTy == ARM_AM::rrx) {
2471    // RRX Doesn't have an explicit shift amount. The encoder expects
2472    // the shift register to be the same as the source register. Seems odd,
2473    // but OK.
2474    ShiftReg = SrcReg;
2475  } else {
2476    // Figure out if this is shifted by a constant or a register (for non-RRX).
2477    if (Parser.getTok().is(AsmToken::Hash) ||
2478        Parser.getTok().is(AsmToken::Dollar)) {
2479      Parser.Lex(); // Eat hash.
2480      SMLoc ImmLoc = Parser.getTok().getLoc();
2481      const MCExpr *ShiftExpr = 0;
2482      if (getParser().ParseExpression(ShiftExpr)) {
2483        Error(ImmLoc, "invalid immediate shift value");
2484        return -1;
2485      }
2486      // The expression must be evaluatable as an immediate.
2487      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2488      if (!CE) {
2489        Error(ImmLoc, "invalid immediate shift value");
2490        return -1;
2491      }
2492      // Range check the immediate.
2493      // lsl, ror: 0 <= imm <= 31
2494      // lsr, asr: 0 <= imm <= 32
2495      Imm = CE->getValue();
2496      if (Imm < 0 ||
2497          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2498          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2499        Error(ImmLoc, "immediate shift value out of range");
2500        return -1;
2501      }
2502      // shift by zero is a nop. Always send it through as lsl.
2503      // ('as' compatibility)
2504      if (Imm == 0)
2505        ShiftTy = ARM_AM::lsl;
2506    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2507      ShiftReg = tryParseRegister();
2508      SMLoc L = Parser.getTok().getLoc();
2509      if (ShiftReg == -1) {
2510        Error (L, "expected immediate or register in shift operand");
2511        return -1;
2512      }
2513    } else {
2514      Error (Parser.getTok().getLoc(),
2515                    "expected immediate or register in shift operand");
2516      return -1;
2517    }
2518  }
2519
2520  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2521    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2522                                                         ShiftReg, Imm,
2523                                               S, Parser.getTok().getLoc()));
2524  else
2525    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2526                                               S, Parser.getTok().getLoc()));
2527
2528  return 0;
2529}
2530
2531
2532/// Try to parse a register name.  The token must be an Identifier when called.
2533/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2534/// if there is a "writeback". 'true' if it's not a register.
2535///
2536/// TODO this is likely to change to allow different register types and or to
2537/// parse for a specific register type.
2538bool ARMAsmParser::
2539tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2540  SMLoc S = Parser.getTok().getLoc();
2541  int RegNo = tryParseRegister();
2542  if (RegNo == -1)
2543    return true;
2544
2545  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2546
2547  const AsmToken &ExclaimTok = Parser.getTok();
2548  if (ExclaimTok.is(AsmToken::Exclaim)) {
2549    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2550                                               ExclaimTok.getLoc()));
2551    Parser.Lex(); // Eat exclaim token
2552    return false;
2553  }
2554
2555  // Also check for an index operand. This is only legal for vector registers,
2556  // but that'll get caught OK in operand matching, so we don't need to
2557  // explicitly filter everything else out here.
2558  if (Parser.getTok().is(AsmToken::LBrac)) {
2559    SMLoc SIdx = Parser.getTok().getLoc();
2560    Parser.Lex(); // Eat left bracket token.
2561
2562    const MCExpr *ImmVal;
2563    if (getParser().ParseExpression(ImmVal))
2564      return true;
2565    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2566    if (!MCE)
2567      return TokError("immediate value expected for vector index");
2568
2569    SMLoc E = Parser.getTok().getLoc();
2570    if (Parser.getTok().isNot(AsmToken::RBrac))
2571      return Error(E, "']' expected");
2572
2573    Parser.Lex(); // Eat right bracket token.
2574
2575    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2576                                                     SIdx, E,
2577                                                     getContext()));
2578  }
2579
2580  return false;
2581}
2582
2583/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2584/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2585/// "c5", ...
2586static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2587  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2588  // but efficient.
2589  switch (Name.size()) {
2590  default: return -1;
2591  case 2:
2592    if (Name[0] != CoprocOp)
2593      return -1;
2594    switch (Name[1]) {
2595    default:  return -1;
2596    case '0': return 0;
2597    case '1': return 1;
2598    case '2': return 2;
2599    case '3': return 3;
2600    case '4': return 4;
2601    case '5': return 5;
2602    case '6': return 6;
2603    case '7': return 7;
2604    case '8': return 8;
2605    case '9': return 9;
2606    }
2607  case 3:
2608    if (Name[0] != CoprocOp || Name[1] != '1')
2609      return -1;
2610    switch (Name[2]) {
2611    default:  return -1;
2612    case '0': return 10;
2613    case '1': return 11;
2614    case '2': return 12;
2615    case '3': return 13;
2616    case '4': return 14;
2617    case '5': return 15;
2618    }
2619  }
2620}
2621
2622/// parseITCondCode - Try to parse a condition code for an IT instruction.
2623ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2624parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2625  SMLoc S = Parser.getTok().getLoc();
2626  const AsmToken &Tok = Parser.getTok();
2627  if (!Tok.is(AsmToken::Identifier))
2628    return MatchOperand_NoMatch;
2629  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2630    .Case("eq", ARMCC::EQ)
2631    .Case("ne", ARMCC::NE)
2632    .Case("hs", ARMCC::HS)
2633    .Case("cs", ARMCC::HS)
2634    .Case("lo", ARMCC::LO)
2635    .Case("cc", ARMCC::LO)
2636    .Case("mi", ARMCC::MI)
2637    .Case("pl", ARMCC::PL)
2638    .Case("vs", ARMCC::VS)
2639    .Case("vc", ARMCC::VC)
2640    .Case("hi", ARMCC::HI)
2641    .Case("ls", ARMCC::LS)
2642    .Case("ge", ARMCC::GE)
2643    .Case("lt", ARMCC::LT)
2644    .Case("gt", ARMCC::GT)
2645    .Case("le", ARMCC::LE)
2646    .Case("al", ARMCC::AL)
2647    .Default(~0U);
2648  if (CC == ~0U)
2649    return MatchOperand_NoMatch;
2650  Parser.Lex(); // Eat the token.
2651
2652  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2653
2654  return MatchOperand_Success;
2655}
2656
2657/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2658/// token must be an Identifier when called, and if it is a coprocessor
2659/// number, the token is eaten and the operand is added to the operand list.
2660ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2661parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2662  SMLoc S = Parser.getTok().getLoc();
2663  const AsmToken &Tok = Parser.getTok();
2664  if (Tok.isNot(AsmToken::Identifier))
2665    return MatchOperand_NoMatch;
2666
2667  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2668  if (Num == -1)
2669    return MatchOperand_NoMatch;
2670
2671  Parser.Lex(); // Eat identifier token.
2672  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2673  return MatchOperand_Success;
2674}
2675
2676/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2677/// token must be an Identifier when called, and if it is a coprocessor
2678/// number, the token is eaten and the operand is added to the operand list.
2679ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2680parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2681  SMLoc S = Parser.getTok().getLoc();
2682  const AsmToken &Tok = Parser.getTok();
2683  if (Tok.isNot(AsmToken::Identifier))
2684    return MatchOperand_NoMatch;
2685
2686  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2687  if (Reg == -1)
2688    return MatchOperand_NoMatch;
2689
2690  Parser.Lex(); // Eat identifier token.
2691  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2692  return MatchOperand_Success;
2693}
2694
2695/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2696/// coproc_option : '{' imm0_255 '}'
2697ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2698parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2699  SMLoc S = Parser.getTok().getLoc();
2700
2701  // If this isn't a '{', this isn't a coprocessor immediate operand.
2702  if (Parser.getTok().isNot(AsmToken::LCurly))
2703    return MatchOperand_NoMatch;
2704  Parser.Lex(); // Eat the '{'
2705
2706  const MCExpr *Expr;
2707  SMLoc Loc = Parser.getTok().getLoc();
2708  if (getParser().ParseExpression(Expr)) {
2709    Error(Loc, "illegal expression");
2710    return MatchOperand_ParseFail;
2711  }
2712  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2713  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2714    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2715    return MatchOperand_ParseFail;
2716  }
2717  int Val = CE->getValue();
2718
2719  // Check for and consume the closing '}'
2720  if (Parser.getTok().isNot(AsmToken::RCurly))
2721    return MatchOperand_ParseFail;
2722  SMLoc E = Parser.getTok().getLoc();
2723  Parser.Lex(); // Eat the '}'
2724
2725  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2726  return MatchOperand_Success;
2727}
2728
2729// For register list parsing, we need to map from raw GPR register numbering
2730// to the enumeration values. The enumeration values aren't sorted by
2731// register number due to our using "sp", "lr" and "pc" as canonical names.
2732static unsigned getNextRegister(unsigned Reg) {
2733  // If this is a GPR, we need to do it manually, otherwise we can rely
2734  // on the sort ordering of the enumeration since the other reg-classes
2735  // are sane.
2736  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2737    return Reg + 1;
2738  switch(Reg) {
2739  default: llvm_unreachable("Invalid GPR number!");
2740  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2741  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2742  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2743  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2744  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2745  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2746  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2747  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2748  }
2749}
2750
2751// Return the low-subreg of a given Q register.
2752static unsigned getDRegFromQReg(unsigned QReg) {
2753  switch (QReg) {
2754  default: llvm_unreachable("expected a Q register!");
2755  case ARM::Q0:  return ARM::D0;
2756  case ARM::Q1:  return ARM::D2;
2757  case ARM::Q2:  return ARM::D4;
2758  case ARM::Q3:  return ARM::D6;
2759  case ARM::Q4:  return ARM::D8;
2760  case ARM::Q5:  return ARM::D10;
2761  case ARM::Q6:  return ARM::D12;
2762  case ARM::Q7:  return ARM::D14;
2763  case ARM::Q8:  return ARM::D16;
2764  case ARM::Q9:  return ARM::D18;
2765  case ARM::Q10: return ARM::D20;
2766  case ARM::Q11: return ARM::D22;
2767  case ARM::Q12: return ARM::D24;
2768  case ARM::Q13: return ARM::D26;
2769  case ARM::Q14: return ARM::D28;
2770  case ARM::Q15: return ARM::D30;
2771  }
2772}
2773
2774/// Parse a register list.
2775bool ARMAsmParser::
2776parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2777  assert(Parser.getTok().is(AsmToken::LCurly) &&
2778         "Token is not a Left Curly Brace");
2779  SMLoc S = Parser.getTok().getLoc();
2780  Parser.Lex(); // Eat '{' token.
2781  SMLoc RegLoc = Parser.getTok().getLoc();
2782
2783  // Check the first register in the list to see what register class
2784  // this is a list of.
2785  int Reg = tryParseRegister();
2786  if (Reg == -1)
2787    return Error(RegLoc, "register expected");
2788
2789  // The reglist instructions have at most 16 registers, so reserve
2790  // space for that many.
2791  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2792
2793  // Allow Q regs and just interpret them as the two D sub-registers.
2794  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2795    Reg = getDRegFromQReg(Reg);
2796    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2797    ++Reg;
2798  }
2799  const MCRegisterClass *RC;
2800  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2801    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2802  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2803    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2804  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2805    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2806  else
2807    return Error(RegLoc, "invalid register in register list");
2808
2809  // Store the register.
2810  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2811
2812  // This starts immediately after the first register token in the list,
2813  // so we can see either a comma or a minus (range separator) as a legal
2814  // next token.
2815  while (Parser.getTok().is(AsmToken::Comma) ||
2816         Parser.getTok().is(AsmToken::Minus)) {
2817    if (Parser.getTok().is(AsmToken::Minus)) {
2818      Parser.Lex(); // Eat the minus.
2819      SMLoc EndLoc = Parser.getTok().getLoc();
2820      int EndReg = tryParseRegister();
2821      if (EndReg == -1)
2822        return Error(EndLoc, "register expected");
2823      // Allow Q regs and just interpret them as the two D sub-registers.
2824      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2825        EndReg = getDRegFromQReg(EndReg) + 1;
2826      // If the register is the same as the start reg, there's nothing
2827      // more to do.
2828      if (Reg == EndReg)
2829        continue;
2830      // The register must be in the same register class as the first.
2831      if (!RC->contains(EndReg))
2832        return Error(EndLoc, "invalid register in register list");
2833      // Ranges must go from low to high.
2834      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2835        return Error(EndLoc, "bad range in register list");
2836
2837      // Add all the registers in the range to the register list.
2838      while (Reg != EndReg) {
2839        Reg = getNextRegister(Reg);
2840        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2841      }
2842      continue;
2843    }
2844    Parser.Lex(); // Eat the comma.
2845    RegLoc = Parser.getTok().getLoc();
2846    int OldReg = Reg;
2847    const AsmToken RegTok = Parser.getTok();
2848    Reg = tryParseRegister();
2849    if (Reg == -1)
2850      return Error(RegLoc, "register expected");
2851    // Allow Q regs and just interpret them as the two D sub-registers.
2852    bool isQReg = false;
2853    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2854      Reg = getDRegFromQReg(Reg);
2855      isQReg = true;
2856    }
2857    // The register must be in the same register class as the first.
2858    if (!RC->contains(Reg))
2859      return Error(RegLoc, "invalid register in register list");
2860    // List must be monotonically increasing.
2861    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2862      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2863        Warning(RegLoc, "register list not in ascending order");
2864      else
2865        return Error(RegLoc, "register list not in ascending order");
2866    }
2867    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2868      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2869              ") in register list");
2870      continue;
2871    }
2872    // VFP register lists must also be contiguous.
2873    // It's OK to use the enumeration values directly here rather, as the
2874    // VFP register classes have the enum sorted properly.
2875    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2876        Reg != OldReg + 1)
2877      return Error(RegLoc, "non-contiguous register range");
2878    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2879    if (isQReg)
2880      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2881  }
2882
2883  SMLoc E = Parser.getTok().getLoc();
2884  if (Parser.getTok().isNot(AsmToken::RCurly))
2885    return Error(E, "'}' expected");
2886  Parser.Lex(); // Eat '}' token.
2887
2888  // Push the register list operand.
2889  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2890
2891  // The ARM system instruction variants for LDM/STM have a '^' token here.
2892  if (Parser.getTok().is(AsmToken::Caret)) {
2893    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2894    Parser.Lex(); // Eat '^' token.
2895  }
2896
2897  return false;
2898}
2899
2900// Helper function to parse the lane index for vector lists.
2901ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2902parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2903  Index = 0; // Always return a defined index value.
2904  if (Parser.getTok().is(AsmToken::LBrac)) {
2905    Parser.Lex(); // Eat the '['.
2906    if (Parser.getTok().is(AsmToken::RBrac)) {
2907      // "Dn[]" is the 'all lanes' syntax.
2908      LaneKind = AllLanes;
2909      Parser.Lex(); // Eat the ']'.
2910      return MatchOperand_Success;
2911    }
2912
2913    // There's an optional '#' token here. Normally there wouldn't be, but
2914    // inline assemble puts one in, and it's friendly to accept that.
2915    if (Parser.getTok().is(AsmToken::Hash))
2916      Parser.Lex(); // Eat the '#'
2917
2918    const MCExpr *LaneIndex;
2919    SMLoc Loc = Parser.getTok().getLoc();
2920    if (getParser().ParseExpression(LaneIndex)) {
2921      Error(Loc, "illegal expression");
2922      return MatchOperand_ParseFail;
2923    }
2924    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2925    if (!CE) {
2926      Error(Loc, "lane index must be empty or an integer");
2927      return MatchOperand_ParseFail;
2928    }
2929    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2930      Error(Parser.getTok().getLoc(), "']' expected");
2931      return MatchOperand_ParseFail;
2932    }
2933    Parser.Lex(); // Eat the ']'.
2934    int64_t Val = CE->getValue();
2935
2936    // FIXME: Make this range check context sensitive for .8, .16, .32.
2937    if (Val < 0 || Val > 7) {
2938      Error(Parser.getTok().getLoc(), "lane index out of range");
2939      return MatchOperand_ParseFail;
2940    }
2941    Index = Val;
2942    LaneKind = IndexedLane;
2943    return MatchOperand_Success;
2944  }
2945  LaneKind = NoLanes;
2946  return MatchOperand_Success;
2947}
2948
2949// parse a vector register list
2950ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2951parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2952  VectorLaneTy LaneKind;
2953  unsigned LaneIndex;
2954  SMLoc S = Parser.getTok().getLoc();
2955  // As an extension (to match gas), support a plain D register or Q register
2956  // (without encosing curly braces) as a single or double entry list,
2957  // respectively.
2958  if (Parser.getTok().is(AsmToken::Identifier)) {
2959    int Reg = tryParseRegister();
2960    if (Reg == -1)
2961      return MatchOperand_NoMatch;
2962    SMLoc E = Parser.getTok().getLoc();
2963    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2964      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2965      if (Res != MatchOperand_Success)
2966        return Res;
2967      switch (LaneKind) {
2968      case NoLanes:
2969        E = Parser.getTok().getLoc();
2970        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2971        break;
2972      case AllLanes:
2973        E = Parser.getTok().getLoc();
2974        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2975                                                                S, E));
2976        break;
2977      case IndexedLane:
2978        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2979                                                               LaneIndex,
2980                                                               false, S, E));
2981        break;
2982      }
2983      return MatchOperand_Success;
2984    }
2985    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2986      Reg = getDRegFromQReg(Reg);
2987      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2988      if (Res != MatchOperand_Success)
2989        return Res;
2990      switch (LaneKind) {
2991      case NoLanes:
2992        E = Parser.getTok().getLoc();
2993        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
2994                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
2995        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2996        break;
2997      case AllLanes:
2998        E = Parser.getTok().getLoc();
2999        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3000                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3001        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3002                                                                S, E));
3003        break;
3004      case IndexedLane:
3005        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3006                                                               LaneIndex,
3007                                                               false, S, E));
3008        break;
3009      }
3010      return MatchOperand_Success;
3011    }
3012    Error(S, "vector register expected");
3013    return MatchOperand_ParseFail;
3014  }
3015
3016  if (Parser.getTok().isNot(AsmToken::LCurly))
3017    return MatchOperand_NoMatch;
3018
3019  Parser.Lex(); // Eat '{' token.
3020  SMLoc RegLoc = Parser.getTok().getLoc();
3021
3022  int Reg = tryParseRegister();
3023  if (Reg == -1) {
3024    Error(RegLoc, "register expected");
3025    return MatchOperand_ParseFail;
3026  }
3027  unsigned Count = 1;
3028  int Spacing = 0;
3029  unsigned FirstReg = Reg;
3030  // The list is of D registers, but we also allow Q regs and just interpret
3031  // them as the two D sub-registers.
3032  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3033    FirstReg = Reg = getDRegFromQReg(Reg);
3034    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3035                 // it's ambiguous with four-register single spaced.
3036    ++Reg;
3037    ++Count;
3038  }
3039  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3040    return MatchOperand_ParseFail;
3041
3042  while (Parser.getTok().is(AsmToken::Comma) ||
3043         Parser.getTok().is(AsmToken::Minus)) {
3044    if (Parser.getTok().is(AsmToken::Minus)) {
3045      if (!Spacing)
3046        Spacing = 1; // Register range implies a single spaced list.
3047      else if (Spacing == 2) {
3048        Error(Parser.getTok().getLoc(),
3049              "sequential registers in double spaced list");
3050        return MatchOperand_ParseFail;
3051      }
3052      Parser.Lex(); // Eat the minus.
3053      SMLoc EndLoc = Parser.getTok().getLoc();
3054      int EndReg = tryParseRegister();
3055      if (EndReg == -1) {
3056        Error(EndLoc, "register expected");
3057        return MatchOperand_ParseFail;
3058      }
3059      // Allow Q regs and just interpret them as the two D sub-registers.
3060      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3061        EndReg = getDRegFromQReg(EndReg) + 1;
3062      // If the register is the same as the start reg, there's nothing
3063      // more to do.
3064      if (Reg == EndReg)
3065        continue;
3066      // The register must be in the same register class as the first.
3067      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3068        Error(EndLoc, "invalid register in register list");
3069        return MatchOperand_ParseFail;
3070      }
3071      // Ranges must go from low to high.
3072      if (Reg > EndReg) {
3073        Error(EndLoc, "bad range in register list");
3074        return MatchOperand_ParseFail;
3075      }
3076      // Parse the lane specifier if present.
3077      VectorLaneTy NextLaneKind;
3078      unsigned NextLaneIndex;
3079      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3080        return MatchOperand_ParseFail;
3081      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3082        Error(EndLoc, "mismatched lane index in register list");
3083        return MatchOperand_ParseFail;
3084      }
3085      EndLoc = Parser.getTok().getLoc();
3086
3087      // Add all the registers in the range to the register list.
3088      Count += EndReg - Reg;
3089      Reg = EndReg;
3090      continue;
3091    }
3092    Parser.Lex(); // Eat the comma.
3093    RegLoc = Parser.getTok().getLoc();
3094    int OldReg = Reg;
3095    Reg = tryParseRegister();
3096    if (Reg == -1) {
3097      Error(RegLoc, "register expected");
3098      return MatchOperand_ParseFail;
3099    }
3100    // vector register lists must be contiguous.
3101    // It's OK to use the enumeration values directly here rather, as the
3102    // VFP register classes have the enum sorted properly.
3103    //
3104    // The list is of D registers, but we also allow Q regs and just interpret
3105    // them as the two D sub-registers.
3106    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3107      if (!Spacing)
3108        Spacing = 1; // Register range implies a single spaced list.
3109      else if (Spacing == 2) {
3110        Error(RegLoc,
3111              "invalid register in double-spaced list (must be 'D' register')");
3112        return MatchOperand_ParseFail;
3113      }
3114      Reg = getDRegFromQReg(Reg);
3115      if (Reg != OldReg + 1) {
3116        Error(RegLoc, "non-contiguous register range");
3117        return MatchOperand_ParseFail;
3118      }
3119      ++Reg;
3120      Count += 2;
3121      // Parse the lane specifier if present.
3122      VectorLaneTy NextLaneKind;
3123      unsigned NextLaneIndex;
3124      SMLoc EndLoc = Parser.getTok().getLoc();
3125      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3126        return MatchOperand_ParseFail;
3127      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3128        Error(EndLoc, "mismatched lane index in register list");
3129        return MatchOperand_ParseFail;
3130      }
3131      continue;
3132    }
3133    // Normal D register.
3134    // Figure out the register spacing (single or double) of the list if
3135    // we don't know it already.
3136    if (!Spacing)
3137      Spacing = 1 + (Reg == OldReg + 2);
3138
3139    // Just check that it's contiguous and keep going.
3140    if (Reg != OldReg + Spacing) {
3141      Error(RegLoc, "non-contiguous register range");
3142      return MatchOperand_ParseFail;
3143    }
3144    ++Count;
3145    // Parse the lane specifier if present.
3146    VectorLaneTy NextLaneKind;
3147    unsigned NextLaneIndex;
3148    SMLoc EndLoc = Parser.getTok().getLoc();
3149    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3150      return MatchOperand_ParseFail;
3151    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3152      Error(EndLoc, "mismatched lane index in register list");
3153      return MatchOperand_ParseFail;
3154    }
3155  }
3156
3157  SMLoc E = Parser.getTok().getLoc();
3158  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3159    Error(E, "'}' expected");
3160    return MatchOperand_ParseFail;
3161  }
3162  Parser.Lex(); // Eat '}' token.
3163
3164  switch (LaneKind) {
3165  case NoLanes:
3166    // Two-register operands have been converted to the
3167    // composite register classes.
3168    if (Count == 2) {
3169      const MCRegisterClass *RC = (Spacing == 1) ?
3170        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3171        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3172      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3173    }
3174
3175    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3176                                                    (Spacing == 2), S, E));
3177    break;
3178  case AllLanes:
3179    // Two-register operands have been converted to the
3180    // composite register classes.
3181    if (Count == 2) {
3182      const MCRegisterClass *RC = (Spacing == 1) ?
3183        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3184        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3185      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3186    }
3187    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3188                                                            (Spacing == 2),
3189                                                            S, E));
3190    break;
3191  case IndexedLane:
3192    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3193                                                           LaneIndex,
3194                                                           (Spacing == 2),
3195                                                           S, E));
3196    break;
3197  }
3198  return MatchOperand_Success;
3199}
3200
3201/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3202ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3203parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3204  SMLoc S = Parser.getTok().getLoc();
3205  const AsmToken &Tok = Parser.getTok();
3206  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3207  StringRef OptStr = Tok.getString();
3208
3209  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3210    .Case("sy",    ARM_MB::SY)
3211    .Case("st",    ARM_MB::ST)
3212    .Case("sh",    ARM_MB::ISH)
3213    .Case("ish",   ARM_MB::ISH)
3214    .Case("shst",  ARM_MB::ISHST)
3215    .Case("ishst", ARM_MB::ISHST)
3216    .Case("nsh",   ARM_MB::NSH)
3217    .Case("un",    ARM_MB::NSH)
3218    .Case("nshst", ARM_MB::NSHST)
3219    .Case("unst",  ARM_MB::NSHST)
3220    .Case("osh",   ARM_MB::OSH)
3221    .Case("oshst", ARM_MB::OSHST)
3222    .Default(~0U);
3223
3224  if (Opt == ~0U)
3225    return MatchOperand_NoMatch;
3226
3227  Parser.Lex(); // Eat identifier token.
3228  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3229  return MatchOperand_Success;
3230}
3231
3232/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3233ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3234parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3235  SMLoc S = Parser.getTok().getLoc();
3236  const AsmToken &Tok = Parser.getTok();
3237  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3238  StringRef IFlagsStr = Tok.getString();
3239
3240  // An iflags string of "none" is interpreted to mean that none of the AIF
3241  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3242  unsigned IFlags = 0;
3243  if (IFlagsStr != "none") {
3244        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3245      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3246        .Case("a", ARM_PROC::A)
3247        .Case("i", ARM_PROC::I)
3248        .Case("f", ARM_PROC::F)
3249        .Default(~0U);
3250
3251      // If some specific iflag is already set, it means that some letter is
3252      // present more than once, this is not acceptable.
3253      if (Flag == ~0U || (IFlags & Flag))
3254        return MatchOperand_NoMatch;
3255
3256      IFlags |= Flag;
3257    }
3258  }
3259
3260  Parser.Lex(); // Eat identifier token.
3261  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3262  return MatchOperand_Success;
3263}
3264
3265/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3266ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3267parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3268  SMLoc S = Parser.getTok().getLoc();
3269  const AsmToken &Tok = Parser.getTok();
3270  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3271  StringRef Mask = Tok.getString();
3272
3273  if (isMClass()) {
3274    // See ARMv6-M 10.1.1
3275    std::string Name = Mask.lower();
3276    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3277      .Case("apsr", 0)
3278      .Case("iapsr", 1)
3279      .Case("eapsr", 2)
3280      .Case("xpsr", 3)
3281      .Case("ipsr", 5)
3282      .Case("epsr", 6)
3283      .Case("iepsr", 7)
3284      .Case("msp", 8)
3285      .Case("psp", 9)
3286      .Case("primask", 16)
3287      .Case("basepri", 17)
3288      .Case("basepri_max", 18)
3289      .Case("faultmask", 19)
3290      .Case("control", 20)
3291      .Default(~0U);
3292
3293    if (FlagsVal == ~0U)
3294      return MatchOperand_NoMatch;
3295
3296    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3297      // basepri, basepri_max and faultmask only valid for V7m.
3298      return MatchOperand_NoMatch;
3299
3300    Parser.Lex(); // Eat identifier token.
3301    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3302    return MatchOperand_Success;
3303  }
3304
3305  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3306  size_t Start = 0, Next = Mask.find('_');
3307  StringRef Flags = "";
3308  std::string SpecReg = Mask.slice(Start, Next).lower();
3309  if (Next != StringRef::npos)
3310    Flags = Mask.slice(Next+1, Mask.size());
3311
3312  // FlagsVal contains the complete mask:
3313  // 3-0: Mask
3314  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3315  unsigned FlagsVal = 0;
3316
3317  if (SpecReg == "apsr") {
3318    FlagsVal = StringSwitch<unsigned>(Flags)
3319    .Case("nzcvq",  0x8) // same as CPSR_f
3320    .Case("g",      0x4) // same as CPSR_s
3321    .Case("nzcvqg", 0xc) // same as CPSR_fs
3322    .Default(~0U);
3323
3324    if (FlagsVal == ~0U) {
3325      if (!Flags.empty())
3326        return MatchOperand_NoMatch;
3327      else
3328        FlagsVal = 8; // No flag
3329    }
3330  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3331    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3332      Flags = "fc";
3333    for (int i = 0, e = Flags.size(); i != e; ++i) {
3334      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3335      .Case("c", 1)
3336      .Case("x", 2)
3337      .Case("s", 4)
3338      .Case("f", 8)
3339      .Default(~0U);
3340
3341      // If some specific flag is already set, it means that some letter is
3342      // present more than once, this is not acceptable.
3343      if (FlagsVal == ~0U || (FlagsVal & Flag))
3344        return MatchOperand_NoMatch;
3345      FlagsVal |= Flag;
3346    }
3347  } else // No match for special register.
3348    return MatchOperand_NoMatch;
3349
3350  // Special register without flags is NOT equivalent to "fc" flags.
3351  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3352  // two lines would enable gas compatibility at the expense of breaking
3353  // round-tripping.
3354  //
3355  // if (!FlagsVal)
3356  //  FlagsVal = 0x9;
3357
3358  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3359  if (SpecReg == "spsr")
3360    FlagsVal |= 16;
3361
3362  Parser.Lex(); // Eat identifier token.
3363  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3364  return MatchOperand_Success;
3365}
3366
3367ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3368parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3369            int Low, int High) {
3370  const AsmToken &Tok = Parser.getTok();
3371  if (Tok.isNot(AsmToken::Identifier)) {
3372    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3373    return MatchOperand_ParseFail;
3374  }
3375  StringRef ShiftName = Tok.getString();
3376  std::string LowerOp = Op.lower();
3377  std::string UpperOp = Op.upper();
3378  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3379    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3380    return MatchOperand_ParseFail;
3381  }
3382  Parser.Lex(); // Eat shift type token.
3383
3384  // There must be a '#' and a shift amount.
3385  if (Parser.getTok().isNot(AsmToken::Hash) &&
3386      Parser.getTok().isNot(AsmToken::Dollar)) {
3387    Error(Parser.getTok().getLoc(), "'#' expected");
3388    return MatchOperand_ParseFail;
3389  }
3390  Parser.Lex(); // Eat hash token.
3391
3392  const MCExpr *ShiftAmount;
3393  SMLoc Loc = Parser.getTok().getLoc();
3394  if (getParser().ParseExpression(ShiftAmount)) {
3395    Error(Loc, "illegal expression");
3396    return MatchOperand_ParseFail;
3397  }
3398  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3399  if (!CE) {
3400    Error(Loc, "constant expression expected");
3401    return MatchOperand_ParseFail;
3402  }
3403  int Val = CE->getValue();
3404  if (Val < Low || Val > High) {
3405    Error(Loc, "immediate value out of range");
3406    return MatchOperand_ParseFail;
3407  }
3408
3409  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3410
3411  return MatchOperand_Success;
3412}
3413
3414ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3415parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3416  const AsmToken &Tok = Parser.getTok();
3417  SMLoc S = Tok.getLoc();
3418  if (Tok.isNot(AsmToken::Identifier)) {
3419    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3420    return MatchOperand_ParseFail;
3421  }
3422  int Val = StringSwitch<int>(Tok.getString())
3423    .Case("be", 1)
3424    .Case("le", 0)
3425    .Default(-1);
3426  Parser.Lex(); // Eat the token.
3427
3428  if (Val == -1) {
3429    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3430    return MatchOperand_ParseFail;
3431  }
3432  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3433                                                                  getContext()),
3434                                           S, Parser.getTok().getLoc()));
3435  return MatchOperand_Success;
3436}
3437
3438/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3439/// instructions. Legal values are:
3440///     lsl #n  'n' in [0,31]
3441///     asr #n  'n' in [1,32]
3442///             n == 32 encoded as n == 0.
3443ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3444parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3445  const AsmToken &Tok = Parser.getTok();
3446  SMLoc S = Tok.getLoc();
3447  if (Tok.isNot(AsmToken::Identifier)) {
3448    Error(S, "shift operator 'asr' or 'lsl' expected");
3449    return MatchOperand_ParseFail;
3450  }
3451  StringRef ShiftName = Tok.getString();
3452  bool isASR;
3453  if (ShiftName == "lsl" || ShiftName == "LSL")
3454    isASR = false;
3455  else if (ShiftName == "asr" || ShiftName == "ASR")
3456    isASR = true;
3457  else {
3458    Error(S, "shift operator 'asr' or 'lsl' expected");
3459    return MatchOperand_ParseFail;
3460  }
3461  Parser.Lex(); // Eat the operator.
3462
3463  // A '#' and a shift amount.
3464  if (Parser.getTok().isNot(AsmToken::Hash) &&
3465      Parser.getTok().isNot(AsmToken::Dollar)) {
3466    Error(Parser.getTok().getLoc(), "'#' expected");
3467    return MatchOperand_ParseFail;
3468  }
3469  Parser.Lex(); // Eat hash token.
3470
3471  const MCExpr *ShiftAmount;
3472  SMLoc E = Parser.getTok().getLoc();
3473  if (getParser().ParseExpression(ShiftAmount)) {
3474    Error(E, "malformed shift expression");
3475    return MatchOperand_ParseFail;
3476  }
3477  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3478  if (!CE) {
3479    Error(E, "shift amount must be an immediate");
3480    return MatchOperand_ParseFail;
3481  }
3482
3483  int64_t Val = CE->getValue();
3484  if (isASR) {
3485    // Shift amount must be in [1,32]
3486    if (Val < 1 || Val > 32) {
3487      Error(E, "'asr' shift amount must be in range [1,32]");
3488      return MatchOperand_ParseFail;
3489    }
3490    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3491    if (isThumb() && Val == 32) {
3492      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3493      return MatchOperand_ParseFail;
3494    }
3495    if (Val == 32) Val = 0;
3496  } else {
3497    // Shift amount must be in [1,32]
3498    if (Val < 0 || Val > 31) {
3499      Error(E, "'lsr' shift amount must be in range [0,31]");
3500      return MatchOperand_ParseFail;
3501    }
3502  }
3503
3504  E = Parser.getTok().getLoc();
3505  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3506
3507  return MatchOperand_Success;
3508}
3509
3510/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3511/// of instructions. Legal values are:
3512///     ror #n  'n' in {0, 8, 16, 24}
3513ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3514parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3515  const AsmToken &Tok = Parser.getTok();
3516  SMLoc S = Tok.getLoc();
3517  if (Tok.isNot(AsmToken::Identifier))
3518    return MatchOperand_NoMatch;
3519  StringRef ShiftName = Tok.getString();
3520  if (ShiftName != "ror" && ShiftName != "ROR")
3521    return MatchOperand_NoMatch;
3522  Parser.Lex(); // Eat the operator.
3523
3524  // A '#' and a rotate amount.
3525  if (Parser.getTok().isNot(AsmToken::Hash) &&
3526      Parser.getTok().isNot(AsmToken::Dollar)) {
3527    Error(Parser.getTok().getLoc(), "'#' expected");
3528    return MatchOperand_ParseFail;
3529  }
3530  Parser.Lex(); // Eat hash token.
3531
3532  const MCExpr *ShiftAmount;
3533  SMLoc E = Parser.getTok().getLoc();
3534  if (getParser().ParseExpression(ShiftAmount)) {
3535    Error(E, "malformed rotate expression");
3536    return MatchOperand_ParseFail;
3537  }
3538  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3539  if (!CE) {
3540    Error(E, "rotate amount must be an immediate");
3541    return MatchOperand_ParseFail;
3542  }
3543
3544  int64_t Val = CE->getValue();
3545  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3546  // normally, zero is represented in asm by omitting the rotate operand
3547  // entirely.
3548  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3549    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3550    return MatchOperand_ParseFail;
3551  }
3552
3553  E = Parser.getTok().getLoc();
3554  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3555
3556  return MatchOperand_Success;
3557}
3558
3559ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3560parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3561  SMLoc S = Parser.getTok().getLoc();
3562  // The bitfield descriptor is really two operands, the LSB and the width.
3563  if (Parser.getTok().isNot(AsmToken::Hash) &&
3564      Parser.getTok().isNot(AsmToken::Dollar)) {
3565    Error(Parser.getTok().getLoc(), "'#' expected");
3566    return MatchOperand_ParseFail;
3567  }
3568  Parser.Lex(); // Eat hash token.
3569
3570  const MCExpr *LSBExpr;
3571  SMLoc E = Parser.getTok().getLoc();
3572  if (getParser().ParseExpression(LSBExpr)) {
3573    Error(E, "malformed immediate expression");
3574    return MatchOperand_ParseFail;
3575  }
3576  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3577  if (!CE) {
3578    Error(E, "'lsb' operand must be an immediate");
3579    return MatchOperand_ParseFail;
3580  }
3581
3582  int64_t LSB = CE->getValue();
3583  // The LSB must be in the range [0,31]
3584  if (LSB < 0 || LSB > 31) {
3585    Error(E, "'lsb' operand must be in the range [0,31]");
3586    return MatchOperand_ParseFail;
3587  }
3588  E = Parser.getTok().getLoc();
3589
3590  // Expect another immediate operand.
3591  if (Parser.getTok().isNot(AsmToken::Comma)) {
3592    Error(Parser.getTok().getLoc(), "too few operands");
3593    return MatchOperand_ParseFail;
3594  }
3595  Parser.Lex(); // Eat hash token.
3596  if (Parser.getTok().isNot(AsmToken::Hash) &&
3597      Parser.getTok().isNot(AsmToken::Dollar)) {
3598    Error(Parser.getTok().getLoc(), "'#' expected");
3599    return MatchOperand_ParseFail;
3600  }
3601  Parser.Lex(); // Eat hash token.
3602
3603  const MCExpr *WidthExpr;
3604  if (getParser().ParseExpression(WidthExpr)) {
3605    Error(E, "malformed immediate expression");
3606    return MatchOperand_ParseFail;
3607  }
3608  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3609  if (!CE) {
3610    Error(E, "'width' operand must be an immediate");
3611    return MatchOperand_ParseFail;
3612  }
3613
3614  int64_t Width = CE->getValue();
3615  // The LSB must be in the range [1,32-lsb]
3616  if (Width < 1 || Width > 32 - LSB) {
3617    Error(E, "'width' operand must be in the range [1,32-lsb]");
3618    return MatchOperand_ParseFail;
3619  }
3620  E = Parser.getTok().getLoc();
3621
3622  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3623
3624  return MatchOperand_Success;
3625}
3626
3627ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3628parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3629  // Check for a post-index addressing register operand. Specifically:
3630  // postidx_reg := '+' register {, shift}
3631  //              | '-' register {, shift}
3632  //              | register {, shift}
3633
3634  // This method must return MatchOperand_NoMatch without consuming any tokens
3635  // in the case where there is no match, as other alternatives take other
3636  // parse methods.
3637  AsmToken Tok = Parser.getTok();
3638  SMLoc S = Tok.getLoc();
3639  bool haveEaten = false;
3640  bool isAdd = true;
3641  int Reg = -1;
3642  if (Tok.is(AsmToken::Plus)) {
3643    Parser.Lex(); // Eat the '+' token.
3644    haveEaten = true;
3645  } else if (Tok.is(AsmToken::Minus)) {
3646    Parser.Lex(); // Eat the '-' token.
3647    isAdd = false;
3648    haveEaten = true;
3649  }
3650  if (Parser.getTok().is(AsmToken::Identifier))
3651    Reg = tryParseRegister();
3652  if (Reg == -1) {
3653    if (!haveEaten)
3654      return MatchOperand_NoMatch;
3655    Error(Parser.getTok().getLoc(), "register expected");
3656    return MatchOperand_ParseFail;
3657  }
3658  SMLoc E = Parser.getTok().getLoc();
3659
3660  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3661  unsigned ShiftImm = 0;
3662  if (Parser.getTok().is(AsmToken::Comma)) {
3663    Parser.Lex(); // Eat the ','.
3664    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3665      return MatchOperand_ParseFail;
3666  }
3667
3668  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3669                                                  ShiftImm, S, E));
3670
3671  return MatchOperand_Success;
3672}
3673
3674ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3675parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3676  // Check for a post-index addressing register operand. Specifically:
3677  // am3offset := '+' register
3678  //              | '-' register
3679  //              | register
3680  //              | # imm
3681  //              | # + imm
3682  //              | # - imm
3683
3684  // This method must return MatchOperand_NoMatch without consuming any tokens
3685  // in the case where there is no match, as other alternatives take other
3686  // parse methods.
3687  AsmToken Tok = Parser.getTok();
3688  SMLoc S = Tok.getLoc();
3689
3690  // Do immediates first, as we always parse those if we have a '#'.
3691  if (Parser.getTok().is(AsmToken::Hash) ||
3692      Parser.getTok().is(AsmToken::Dollar)) {
3693    Parser.Lex(); // Eat the '#'.
3694    // Explicitly look for a '-', as we need to encode negative zero
3695    // differently.
3696    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3697    const MCExpr *Offset;
3698    if (getParser().ParseExpression(Offset))
3699      return MatchOperand_ParseFail;
3700    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3701    if (!CE) {
3702      Error(S, "constant expression expected");
3703      return MatchOperand_ParseFail;
3704    }
3705    SMLoc E = Tok.getLoc();
3706    // Negative zero is encoded as the flag value INT32_MIN.
3707    int32_t Val = CE->getValue();
3708    if (isNegative && Val == 0)
3709      Val = INT32_MIN;
3710
3711    Operands.push_back(
3712      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3713
3714    return MatchOperand_Success;
3715  }
3716
3717
3718  bool haveEaten = false;
3719  bool isAdd = true;
3720  int Reg = -1;
3721  if (Tok.is(AsmToken::Plus)) {
3722    Parser.Lex(); // Eat the '+' token.
3723    haveEaten = true;
3724  } else if (Tok.is(AsmToken::Minus)) {
3725    Parser.Lex(); // Eat the '-' token.
3726    isAdd = false;
3727    haveEaten = true;
3728  }
3729  if (Parser.getTok().is(AsmToken::Identifier))
3730    Reg = tryParseRegister();
3731  if (Reg == -1) {
3732    if (!haveEaten)
3733      return MatchOperand_NoMatch;
3734    Error(Parser.getTok().getLoc(), "register expected");
3735    return MatchOperand_ParseFail;
3736  }
3737  SMLoc E = Parser.getTok().getLoc();
3738
3739  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3740                                                  0, S, E));
3741
3742  return MatchOperand_Success;
3743}
3744
3745/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3746/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3747/// when they refer multiple MIOperands inside a single one.
3748bool ARMAsmParser::
3749cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3750             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3751  // Rt, Rt2
3752  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3753  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3754  // Create a writeback register dummy placeholder.
3755  Inst.addOperand(MCOperand::CreateReg(0));
3756  // addr
3757  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3758  // pred
3759  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3760  return true;
3761}
3762
3763/// cvtT2StrdPre - Convert parsed operands to MCInst.
3764/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3765/// when they refer multiple MIOperands inside a single one.
3766bool ARMAsmParser::
3767cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3768             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3769  // Create a writeback register dummy placeholder.
3770  Inst.addOperand(MCOperand::CreateReg(0));
3771  // Rt, Rt2
3772  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3773  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3774  // addr
3775  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3776  // pred
3777  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3778  return true;
3779}
3780
3781/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3782/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3783/// when they refer multiple MIOperands inside a single one.
3784bool ARMAsmParser::
3785cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3786                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3787  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3788
3789  // Create a writeback register dummy placeholder.
3790  Inst.addOperand(MCOperand::CreateImm(0));
3791
3792  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3793  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3794  return true;
3795}
3796
3797/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3798/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3799/// when they refer multiple MIOperands inside a single one.
3800bool ARMAsmParser::
3801cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3802                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3803  // Create a writeback register dummy placeholder.
3804  Inst.addOperand(MCOperand::CreateImm(0));
3805  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3806  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3807  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3808  return true;
3809}
3810
3811/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3812/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3813/// when they refer multiple MIOperands inside a single one.
3814bool ARMAsmParser::
3815cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3816                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3817  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3818
3819  // Create a writeback register dummy placeholder.
3820  Inst.addOperand(MCOperand::CreateImm(0));
3821
3822  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3823  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3824  return true;
3825}
3826
3827/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3828/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3829/// when they refer multiple MIOperands inside a single one.
3830bool ARMAsmParser::
3831cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3832                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3833  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3834
3835  // Create a writeback register dummy placeholder.
3836  Inst.addOperand(MCOperand::CreateImm(0));
3837
3838  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3839  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3840  return true;
3841}
3842
3843
3844/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3845/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3846/// when they refer multiple MIOperands inside a single one.
3847bool ARMAsmParser::
3848cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3849                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3850  // Create a writeback register dummy placeholder.
3851  Inst.addOperand(MCOperand::CreateImm(0));
3852  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3853  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3854  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3855  return true;
3856}
3857
3858/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3859/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3860/// when they refer multiple MIOperands inside a single one.
3861bool ARMAsmParser::
3862cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3863                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3864  // Create a writeback register dummy placeholder.
3865  Inst.addOperand(MCOperand::CreateImm(0));
3866  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3867  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3868  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3869  return true;
3870}
3871
3872/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3873/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3874/// when they refer multiple MIOperands inside a single one.
3875bool ARMAsmParser::
3876cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3877                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3878  // Create a writeback register dummy placeholder.
3879  Inst.addOperand(MCOperand::CreateImm(0));
3880  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3881  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3882  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3883  return true;
3884}
3885
3886/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3887/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3888/// when they refer multiple MIOperands inside a single one.
3889bool ARMAsmParser::
3890cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3891                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3892  // Rt
3893  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3894  // Create a writeback register dummy placeholder.
3895  Inst.addOperand(MCOperand::CreateImm(0));
3896  // addr
3897  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3898  // offset
3899  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3900  // pred
3901  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3902  return true;
3903}
3904
3905/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3906/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3907/// when they refer multiple MIOperands inside a single one.
3908bool ARMAsmParser::
3909cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3910                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3911  // Rt
3912  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3913  // Create a writeback register dummy placeholder.
3914  Inst.addOperand(MCOperand::CreateImm(0));
3915  // addr
3916  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3917  // offset
3918  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3919  // pred
3920  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3921  return true;
3922}
3923
3924/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3925/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3926/// when they refer multiple MIOperands inside a single one.
3927bool ARMAsmParser::
3928cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3929                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3930  // Create a writeback register dummy placeholder.
3931  Inst.addOperand(MCOperand::CreateImm(0));
3932  // Rt
3933  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3934  // addr
3935  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3936  // offset
3937  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3938  // pred
3939  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3940  return true;
3941}
3942
3943/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3944/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3945/// when they refer multiple MIOperands inside a single one.
3946bool ARMAsmParser::
3947cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3948                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3949  // Create a writeback register dummy placeholder.
3950  Inst.addOperand(MCOperand::CreateImm(0));
3951  // Rt
3952  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3953  // addr
3954  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3955  // offset
3956  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3957  // pred
3958  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3959  return true;
3960}
3961
3962/// cvtLdrdPre - Convert parsed operands to MCInst.
3963/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3964/// when they refer multiple MIOperands inside a single one.
3965bool ARMAsmParser::
3966cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3967           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3968  // Rt, Rt2
3969  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3970  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3971  // Create a writeback register dummy placeholder.
3972  Inst.addOperand(MCOperand::CreateImm(0));
3973  // addr
3974  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3975  // pred
3976  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3977  return true;
3978}
3979
3980/// cvtStrdPre - Convert parsed operands to MCInst.
3981/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3982/// when they refer multiple MIOperands inside a single one.
3983bool ARMAsmParser::
3984cvtStrdPre(MCInst &Inst, unsigned Opcode,
3985           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3986  // Create a writeback register dummy placeholder.
3987  Inst.addOperand(MCOperand::CreateImm(0));
3988  // Rt, Rt2
3989  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3990  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3991  // addr
3992  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3993  // pred
3994  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3995  return true;
3996}
3997
3998/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3999/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4000/// when they refer multiple MIOperands inside a single one.
4001bool ARMAsmParser::
4002cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
4003                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4004  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4005  // Create a writeback register dummy placeholder.
4006  Inst.addOperand(MCOperand::CreateImm(0));
4007  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4008  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4009  return true;
4010}
4011
4012/// cvtThumbMultiple- Convert parsed operands to MCInst.
4013/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4014/// when they refer multiple MIOperands inside a single one.
4015bool ARMAsmParser::
4016cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4017           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4018  // The second source operand must be the same register as the destination
4019  // operand.
4020  if (Operands.size() == 6 &&
4021      (((ARMOperand*)Operands[3])->getReg() !=
4022       ((ARMOperand*)Operands[5])->getReg()) &&
4023      (((ARMOperand*)Operands[3])->getReg() !=
4024       ((ARMOperand*)Operands[4])->getReg())) {
4025    Error(Operands[3]->getStartLoc(),
4026          "destination register must match source register");
4027    return false;
4028  }
4029  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4030  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4031  // If we have a three-operand form, make sure to set Rn to be the operand
4032  // that isn't the same as Rd.
4033  unsigned RegOp = 4;
4034  if (Operands.size() == 6 &&
4035      ((ARMOperand*)Operands[4])->getReg() ==
4036        ((ARMOperand*)Operands[3])->getReg())
4037    RegOp = 5;
4038  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4039  Inst.addOperand(Inst.getOperand(0));
4040  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4041
4042  return true;
4043}
4044
4045bool ARMAsmParser::
4046cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4047              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4048  // Vd
4049  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4050  // Create a writeback register dummy placeholder.
4051  Inst.addOperand(MCOperand::CreateImm(0));
4052  // Vn
4053  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4054  // pred
4055  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4056  return true;
4057}
4058
4059bool ARMAsmParser::
4060cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4061                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4062  // Vd
4063  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4064  // Create a writeback register dummy placeholder.
4065  Inst.addOperand(MCOperand::CreateImm(0));
4066  // Vn
4067  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4068  // Vm
4069  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4070  // pred
4071  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4072  return true;
4073}
4074
4075bool ARMAsmParser::
4076cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4077              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4078  // Create a writeback register dummy placeholder.
4079  Inst.addOperand(MCOperand::CreateImm(0));
4080  // Vn
4081  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4082  // Vt
4083  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4084  // pred
4085  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4086  return true;
4087}
4088
4089bool ARMAsmParser::
4090cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4091                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4092  // Create a writeback register dummy placeholder.
4093  Inst.addOperand(MCOperand::CreateImm(0));
4094  // Vn
4095  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4096  // Vm
4097  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4098  // Vt
4099  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4100  // pred
4101  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4102  return true;
4103}
4104
4105/// Parse an ARM memory expression, return false if successful else return true
4106/// or an error.  The first token must be a '[' when called.
4107bool ARMAsmParser::
4108parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4109  SMLoc S, E;
4110  assert(Parser.getTok().is(AsmToken::LBrac) &&
4111         "Token is not a Left Bracket");
4112  S = Parser.getTok().getLoc();
4113  Parser.Lex(); // Eat left bracket token.
4114
4115  const AsmToken &BaseRegTok = Parser.getTok();
4116  int BaseRegNum = tryParseRegister();
4117  if (BaseRegNum == -1)
4118    return Error(BaseRegTok.getLoc(), "register expected");
4119
4120  // The next token must either be a comma or a closing bracket.
4121  const AsmToken &Tok = Parser.getTok();
4122  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4123    return Error(Tok.getLoc(), "malformed memory operand");
4124
4125  if (Tok.is(AsmToken::RBrac)) {
4126    E = Tok.getLoc();
4127    Parser.Lex(); // Eat right bracket token.
4128
4129    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4130                                             0, 0, false, S, E));
4131
4132    // If there's a pre-indexing writeback marker, '!', just add it as a token
4133    // operand. It's rather odd, but syntactically valid.
4134    if (Parser.getTok().is(AsmToken::Exclaim)) {
4135      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4136      Parser.Lex(); // Eat the '!'.
4137    }
4138
4139    return false;
4140  }
4141
4142  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4143  Parser.Lex(); // Eat the comma.
4144
4145  // If we have a ':', it's an alignment specifier.
4146  if (Parser.getTok().is(AsmToken::Colon)) {
4147    Parser.Lex(); // Eat the ':'.
4148    E = Parser.getTok().getLoc();
4149
4150    const MCExpr *Expr;
4151    if (getParser().ParseExpression(Expr))
4152     return true;
4153
4154    // The expression has to be a constant. Memory references with relocations
4155    // don't come through here, as they use the <label> forms of the relevant
4156    // instructions.
4157    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4158    if (!CE)
4159      return Error (E, "constant expression expected");
4160
4161    unsigned Align = 0;
4162    switch (CE->getValue()) {
4163    default:
4164      return Error(E,
4165                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4166    case 16:  Align = 2; break;
4167    case 32:  Align = 4; break;
4168    case 64:  Align = 8; break;
4169    case 128: Align = 16; break;
4170    case 256: Align = 32; break;
4171    }
4172
4173    // Now we should have the closing ']'
4174    E = Parser.getTok().getLoc();
4175    if (Parser.getTok().isNot(AsmToken::RBrac))
4176      return Error(E, "']' expected");
4177    Parser.Lex(); // Eat right bracket token.
4178
4179    // Don't worry about range checking the value here. That's handled by
4180    // the is*() predicates.
4181    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4182                                             ARM_AM::no_shift, 0, Align,
4183                                             false, S, E));
4184
4185    // If there's a pre-indexing writeback marker, '!', just add it as a token
4186    // operand.
4187    if (Parser.getTok().is(AsmToken::Exclaim)) {
4188      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4189      Parser.Lex(); // Eat the '!'.
4190    }
4191
4192    return false;
4193  }
4194
4195  // If we have a '#', it's an immediate offset, else assume it's a register
4196  // offset. Be friendly and also accept a plain integer (without a leading
4197  // hash) for gas compatibility.
4198  if (Parser.getTok().is(AsmToken::Hash) ||
4199      Parser.getTok().is(AsmToken::Dollar) ||
4200      Parser.getTok().is(AsmToken::Integer)) {
4201    if (Parser.getTok().isNot(AsmToken::Integer))
4202      Parser.Lex(); // Eat the '#'.
4203    E = Parser.getTok().getLoc();
4204
4205    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4206    const MCExpr *Offset;
4207    if (getParser().ParseExpression(Offset))
4208     return true;
4209
4210    // The expression has to be a constant. Memory references with relocations
4211    // don't come through here, as they use the <label> forms of the relevant
4212    // instructions.
4213    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4214    if (!CE)
4215      return Error (E, "constant expression expected");
4216
4217    // If the constant was #-0, represent it as INT32_MIN.
4218    int32_t Val = CE->getValue();
4219    if (isNegative && Val == 0)
4220      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4221
4222    // Now we should have the closing ']'
4223    E = Parser.getTok().getLoc();
4224    if (Parser.getTok().isNot(AsmToken::RBrac))
4225      return Error(E, "']' expected");
4226    Parser.Lex(); // Eat right bracket token.
4227
4228    // Don't worry about range checking the value here. That's handled by
4229    // the is*() predicates.
4230    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4231                                             ARM_AM::no_shift, 0, 0,
4232                                             false, S, E));
4233
4234    // If there's a pre-indexing writeback marker, '!', just add it as a token
4235    // operand.
4236    if (Parser.getTok().is(AsmToken::Exclaim)) {
4237      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4238      Parser.Lex(); // Eat the '!'.
4239    }
4240
4241    return false;
4242  }
4243
4244  // The register offset is optionally preceded by a '+' or '-'
4245  bool isNegative = false;
4246  if (Parser.getTok().is(AsmToken::Minus)) {
4247    isNegative = true;
4248    Parser.Lex(); // Eat the '-'.
4249  } else if (Parser.getTok().is(AsmToken::Plus)) {
4250    // Nothing to do.
4251    Parser.Lex(); // Eat the '+'.
4252  }
4253
4254  E = Parser.getTok().getLoc();
4255  int OffsetRegNum = tryParseRegister();
4256  if (OffsetRegNum == -1)
4257    return Error(E, "register expected");
4258
4259  // If there's a shift operator, handle it.
4260  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4261  unsigned ShiftImm = 0;
4262  if (Parser.getTok().is(AsmToken::Comma)) {
4263    Parser.Lex(); // Eat the ','.
4264    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4265      return true;
4266  }
4267
4268  // Now we should have the closing ']'
4269  E = Parser.getTok().getLoc();
4270  if (Parser.getTok().isNot(AsmToken::RBrac))
4271    return Error(E, "']' expected");
4272  Parser.Lex(); // Eat right bracket token.
4273
4274  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4275                                           ShiftType, ShiftImm, 0, isNegative,
4276                                           S, E));
4277
4278  // If there's a pre-indexing writeback marker, '!', just add it as a token
4279  // operand.
4280  if (Parser.getTok().is(AsmToken::Exclaim)) {
4281    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4282    Parser.Lex(); // Eat the '!'.
4283  }
4284
4285  return false;
4286}
4287
4288/// parseMemRegOffsetShift - one of these two:
4289///   ( lsl | lsr | asr | ror ) , # shift_amount
4290///   rrx
4291/// return true if it parses a shift otherwise it returns false.
4292bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4293                                          unsigned &Amount) {
4294  SMLoc Loc = Parser.getTok().getLoc();
4295  const AsmToken &Tok = Parser.getTok();
4296  if (Tok.isNot(AsmToken::Identifier))
4297    return true;
4298  StringRef ShiftName = Tok.getString();
4299  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4300      ShiftName == "asl" || ShiftName == "ASL")
4301    St = ARM_AM::lsl;
4302  else if (ShiftName == "lsr" || ShiftName == "LSR")
4303    St = ARM_AM::lsr;
4304  else if (ShiftName == "asr" || ShiftName == "ASR")
4305    St = ARM_AM::asr;
4306  else if (ShiftName == "ror" || ShiftName == "ROR")
4307    St = ARM_AM::ror;
4308  else if (ShiftName == "rrx" || ShiftName == "RRX")
4309    St = ARM_AM::rrx;
4310  else
4311    return Error(Loc, "illegal shift operator");
4312  Parser.Lex(); // Eat shift type token.
4313
4314  // rrx stands alone.
4315  Amount = 0;
4316  if (St != ARM_AM::rrx) {
4317    Loc = Parser.getTok().getLoc();
4318    // A '#' and a shift amount.
4319    const AsmToken &HashTok = Parser.getTok();
4320    if (HashTok.isNot(AsmToken::Hash) &&
4321        HashTok.isNot(AsmToken::Dollar))
4322      return Error(HashTok.getLoc(), "'#' expected");
4323    Parser.Lex(); // Eat hash token.
4324
4325    const MCExpr *Expr;
4326    if (getParser().ParseExpression(Expr))
4327      return true;
4328    // Range check the immediate.
4329    // lsl, ror: 0 <= imm <= 31
4330    // lsr, asr: 0 <= imm <= 32
4331    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4332    if (!CE)
4333      return Error(Loc, "shift amount must be an immediate");
4334    int64_t Imm = CE->getValue();
4335    if (Imm < 0 ||
4336        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4337        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4338      return Error(Loc, "immediate shift value out of range");
4339    Amount = Imm;
4340  }
4341
4342  return false;
4343}
4344
4345/// parseFPImm - A floating point immediate expression operand.
4346ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4347parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4348  // Anything that can accept a floating point constant as an operand
4349  // needs to go through here, as the regular ParseExpression is
4350  // integer only.
4351  //
4352  // This routine still creates a generic Immediate operand, containing
4353  // a bitcast of the 64-bit floating point value. The various operands
4354  // that accept floats can check whether the value is valid for them
4355  // via the standard is*() predicates.
4356
4357  SMLoc S = Parser.getTok().getLoc();
4358
4359  if (Parser.getTok().isNot(AsmToken::Hash) &&
4360      Parser.getTok().isNot(AsmToken::Dollar))
4361    return MatchOperand_NoMatch;
4362
4363  // Disambiguate the VMOV forms that can accept an FP immediate.
4364  // vmov.f32 <sreg>, #imm
4365  // vmov.f64 <dreg>, #imm
4366  // vmov.f32 <dreg>, #imm  @ vector f32x2
4367  // vmov.f32 <qreg>, #imm  @ vector f32x4
4368  //
4369  // There are also the NEON VMOV instructions which expect an
4370  // integer constant. Make sure we don't try to parse an FPImm
4371  // for these:
4372  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4373  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4374  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4375                           TyOp->getToken() != ".f64"))
4376    return MatchOperand_NoMatch;
4377
4378  Parser.Lex(); // Eat the '#'.
4379
4380  // Handle negation, as that still comes through as a separate token.
4381  bool isNegative = false;
4382  if (Parser.getTok().is(AsmToken::Minus)) {
4383    isNegative = true;
4384    Parser.Lex();
4385  }
4386  const AsmToken &Tok = Parser.getTok();
4387  SMLoc Loc = Tok.getLoc();
4388  if (Tok.is(AsmToken::Real)) {
4389    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4390    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4391    // If we had a '-' in front, toggle the sign bit.
4392    IntVal ^= (uint64_t)isNegative << 31;
4393    Parser.Lex(); // Eat the token.
4394    Operands.push_back(ARMOperand::CreateImm(
4395          MCConstantExpr::Create(IntVal, getContext()),
4396          S, Parser.getTok().getLoc()));
4397    return MatchOperand_Success;
4398  }
4399  // Also handle plain integers. Instructions which allow floating point
4400  // immediates also allow a raw encoded 8-bit value.
4401  if (Tok.is(AsmToken::Integer)) {
4402    int64_t Val = Tok.getIntVal();
4403    Parser.Lex(); // Eat the token.
4404    if (Val > 255 || Val < 0) {
4405      Error(Loc, "encoded floating point value out of range");
4406      return MatchOperand_ParseFail;
4407    }
4408    double RealVal = ARM_AM::getFPImmFloat(Val);
4409    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4410    Operands.push_back(ARMOperand::CreateImm(
4411        MCConstantExpr::Create(Val, getContext()), S,
4412        Parser.getTok().getLoc()));
4413    return MatchOperand_Success;
4414  }
4415
4416  Error(Loc, "invalid floating point immediate");
4417  return MatchOperand_ParseFail;
4418}
4419
4420/// Parse a arm instruction operand.  For now this parses the operand regardless
4421/// of the mnemonic.
4422bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4423                                StringRef Mnemonic) {
4424  SMLoc S, E;
4425
4426  // Check if the current operand has a custom associated parser, if so, try to
4427  // custom parse the operand, or fallback to the general approach.
4428  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4429  if (ResTy == MatchOperand_Success)
4430    return false;
4431  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4432  // there was a match, but an error occurred, in which case, just return that
4433  // the operand parsing failed.
4434  if (ResTy == MatchOperand_ParseFail)
4435    return true;
4436
4437  switch (getLexer().getKind()) {
4438  default:
4439    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4440    return true;
4441  case AsmToken::Identifier: {
4442    if (!tryParseRegisterWithWriteBack(Operands))
4443      return false;
4444    int Res = tryParseShiftRegister(Operands);
4445    if (Res == 0) // success
4446      return false;
4447    else if (Res == -1) // irrecoverable error
4448      return true;
4449    // If this is VMRS, check for the apsr_nzcv operand.
4450    if (Mnemonic == "vmrs" &&
4451        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4452      S = Parser.getTok().getLoc();
4453      Parser.Lex();
4454      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4455      return false;
4456    }
4457
4458    // Fall though for the Identifier case that is not a register or a
4459    // special name.
4460  }
4461  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4462  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4463  case AsmToken::String:  // quoted label names.
4464  case AsmToken::Dot: {   // . as a branch target
4465    // This was not a register so parse other operands that start with an
4466    // identifier (like labels) as expressions and create them as immediates.
4467    const MCExpr *IdVal;
4468    S = Parser.getTok().getLoc();
4469    if (getParser().ParseExpression(IdVal))
4470      return true;
4471    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4472    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4473    return false;
4474  }
4475  case AsmToken::LBrac:
4476    return parseMemory(Operands);
4477  case AsmToken::LCurly:
4478    return parseRegisterList(Operands);
4479  case AsmToken::Dollar:
4480  case AsmToken::Hash: {
4481    // #42 -> immediate.
4482    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4483    S = Parser.getTok().getLoc();
4484    Parser.Lex();
4485    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4486    const MCExpr *ImmVal;
4487    if (getParser().ParseExpression(ImmVal))
4488      return true;
4489    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4490    if (CE) {
4491      int32_t Val = CE->getValue();
4492      if (isNegative && Val == 0)
4493        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4494    }
4495    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4496    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4497    return false;
4498  }
4499  case AsmToken::Colon: {
4500    // ":lower16:" and ":upper16:" expression prefixes
4501    // FIXME: Check it's an expression prefix,
4502    // e.g. (FOO - :lower16:BAR) isn't legal.
4503    ARMMCExpr::VariantKind RefKind;
4504    if (parsePrefix(RefKind))
4505      return true;
4506
4507    const MCExpr *SubExprVal;
4508    if (getParser().ParseExpression(SubExprVal))
4509      return true;
4510
4511    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4512                                                   getContext());
4513    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4514    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4515    return false;
4516  }
4517  }
4518}
4519
4520// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4521//  :lower16: and :upper16:.
4522bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4523  RefKind = ARMMCExpr::VK_ARM_None;
4524
4525  // :lower16: and :upper16: modifiers
4526  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4527  Parser.Lex(); // Eat ':'
4528
4529  if (getLexer().isNot(AsmToken::Identifier)) {
4530    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4531    return true;
4532  }
4533
4534  StringRef IDVal = Parser.getTok().getIdentifier();
4535  if (IDVal == "lower16") {
4536    RefKind = ARMMCExpr::VK_ARM_LO16;
4537  } else if (IDVal == "upper16") {
4538    RefKind = ARMMCExpr::VK_ARM_HI16;
4539  } else {
4540    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4541    return true;
4542  }
4543  Parser.Lex();
4544
4545  if (getLexer().isNot(AsmToken::Colon)) {
4546    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4547    return true;
4548  }
4549  Parser.Lex(); // Eat the last ':'
4550  return false;
4551}
4552
4553/// \brief Given a mnemonic, split out possible predication code and carry
4554/// setting letters to form a canonical mnemonic and flags.
4555//
4556// FIXME: Would be nice to autogen this.
4557// FIXME: This is a bit of a maze of special cases.
4558StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4559                                      unsigned &PredicationCode,
4560                                      bool &CarrySetting,
4561                                      unsigned &ProcessorIMod,
4562                                      StringRef &ITMask) {
4563  PredicationCode = ARMCC::AL;
4564  CarrySetting = false;
4565  ProcessorIMod = 0;
4566
4567  // Ignore some mnemonics we know aren't predicated forms.
4568  //
4569  // FIXME: Would be nice to autogen this.
4570  if ((Mnemonic == "movs" && isThumb()) ||
4571      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4572      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4573      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4574      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4575      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4576      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4577      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4578      Mnemonic == "fmuls")
4579    return Mnemonic;
4580
4581  // First, split out any predication code. Ignore mnemonics we know aren't
4582  // predicated but do have a carry-set and so weren't caught above.
4583  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4584      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4585      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4586      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4587    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4588      .Case("eq", ARMCC::EQ)
4589      .Case("ne", ARMCC::NE)
4590      .Case("hs", ARMCC::HS)
4591      .Case("cs", ARMCC::HS)
4592      .Case("lo", ARMCC::LO)
4593      .Case("cc", ARMCC::LO)
4594      .Case("mi", ARMCC::MI)
4595      .Case("pl", ARMCC::PL)
4596      .Case("vs", ARMCC::VS)
4597      .Case("vc", ARMCC::VC)
4598      .Case("hi", ARMCC::HI)
4599      .Case("ls", ARMCC::LS)
4600      .Case("ge", ARMCC::GE)
4601      .Case("lt", ARMCC::LT)
4602      .Case("gt", ARMCC::GT)
4603      .Case("le", ARMCC::LE)
4604      .Case("al", ARMCC::AL)
4605      .Default(~0U);
4606    if (CC != ~0U) {
4607      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4608      PredicationCode = CC;
4609    }
4610  }
4611
4612  // Next, determine if we have a carry setting bit. We explicitly ignore all
4613  // the instructions we know end in 's'.
4614  if (Mnemonic.endswith("s") &&
4615      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4616        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4617        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4618        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4619        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4620        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4621        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4622        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4623        (Mnemonic == "movs" && isThumb()))) {
4624    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4625    CarrySetting = true;
4626  }
4627
4628  // The "cps" instruction can have a interrupt mode operand which is glued into
4629  // the mnemonic. Check if this is the case, split it and parse the imod op
4630  if (Mnemonic.startswith("cps")) {
4631    // Split out any imod code.
4632    unsigned IMod =
4633      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4634      .Case("ie", ARM_PROC::IE)
4635      .Case("id", ARM_PROC::ID)
4636      .Default(~0U);
4637    if (IMod != ~0U) {
4638      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4639      ProcessorIMod = IMod;
4640    }
4641  }
4642
4643  // The "it" instruction has the condition mask on the end of the mnemonic.
4644  if (Mnemonic.startswith("it")) {
4645    ITMask = Mnemonic.slice(2, Mnemonic.size());
4646    Mnemonic = Mnemonic.slice(0, 2);
4647  }
4648
4649  return Mnemonic;
4650}
4651
4652/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4653/// inclusion of carry set or predication code operands.
4654//
4655// FIXME: It would be nice to autogen this.
4656void ARMAsmParser::
4657getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4658                      bool &CanAcceptPredicationCode) {
4659  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4660      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4661      Mnemonic == "add" || Mnemonic == "adc" ||
4662      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4663      Mnemonic == "orr" || Mnemonic == "mvn" ||
4664      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4665      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4666      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4667                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4668                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4669    CanAcceptCarrySet = true;
4670  } else
4671    CanAcceptCarrySet = false;
4672
4673  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4674      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4675      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4676      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4677      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4678      (Mnemonic == "clrex" && !isThumb()) ||
4679      (Mnemonic == "nop" && isThumbOne()) ||
4680      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4681        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4682        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4683      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4684       !isThumb()) ||
4685      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4686    CanAcceptPredicationCode = false;
4687  } else
4688    CanAcceptPredicationCode = true;
4689
4690  if (isThumb()) {
4691    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4692        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4693      CanAcceptPredicationCode = false;
4694  }
4695}
4696
4697bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4698                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4699  // FIXME: This is all horribly hacky. We really need a better way to deal
4700  // with optional operands like this in the matcher table.
4701
4702  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4703  // another does not. Specifically, the MOVW instruction does not. So we
4704  // special case it here and remove the defaulted (non-setting) cc_out
4705  // operand if that's the instruction we're trying to match.
4706  //
4707  // We do this as post-processing of the explicit operands rather than just
4708  // conditionally adding the cc_out in the first place because we need
4709  // to check the type of the parsed immediate operand.
4710  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4711      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4712      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4713      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4714    return true;
4715
4716  // Register-register 'add' for thumb does not have a cc_out operand
4717  // when there are only two register operands.
4718  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4719      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4720      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4721      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4722    return true;
4723  // Register-register 'add' for thumb does not have a cc_out operand
4724  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4725  // have to check the immediate range here since Thumb2 has a variant
4726  // that can handle a different range and has a cc_out operand.
4727  if (((isThumb() && Mnemonic == "add") ||
4728       (isThumbTwo() && Mnemonic == "sub")) &&
4729      Operands.size() == 6 &&
4730      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4731      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4732      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4733      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4734      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4735       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4736    return true;
4737  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4738  // imm0_4095 variant. That's the least-preferred variant when
4739  // selecting via the generic "add" mnemonic, so to know that we
4740  // should remove the cc_out operand, we have to explicitly check that
4741  // it's not one of the other variants. Ugh.
4742  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4743      Operands.size() == 6 &&
4744      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4745      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4746      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4747    // Nest conditions rather than one big 'if' statement for readability.
4748    //
4749    // If either register is a high reg, it's either one of the SP
4750    // variants (handled above) or a 32-bit encoding, so we just
4751    // check against T3. If the second register is the PC, this is an
4752    // alternate form of ADR, which uses encoding T4, so check for that too.
4753    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4754         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4755        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4756        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4757      return false;
4758    // If both registers are low, we're in an IT block, and the immediate is
4759    // in range, we should use encoding T1 instead, which has a cc_out.
4760    if (inITBlock() &&
4761        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4762        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4763        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4764      return false;
4765
4766    // Otherwise, we use encoding T4, which does not have a cc_out
4767    // operand.
4768    return true;
4769  }
4770
4771  // The thumb2 multiply instruction doesn't have a CCOut register, so
4772  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4773  // use the 16-bit encoding or not.
4774  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4775      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4776      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4777      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4778      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4779      // If the registers aren't low regs, the destination reg isn't the
4780      // same as one of the source regs, or the cc_out operand is zero
4781      // outside of an IT block, we have to use the 32-bit encoding, so
4782      // remove the cc_out operand.
4783      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4784       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4785       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4786       !inITBlock() ||
4787       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4788        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4789        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4790        static_cast<ARMOperand*>(Operands[4])->getReg())))
4791    return true;
4792
4793  // Also check the 'mul' syntax variant that doesn't specify an explicit
4794  // destination register.
4795  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4796      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4797      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4798      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4799      // If the registers aren't low regs  or the cc_out operand is zero
4800      // outside of an IT block, we have to use the 32-bit encoding, so
4801      // remove the cc_out operand.
4802      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4803       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4804       !inITBlock()))
4805    return true;
4806
4807
4808
4809  // Register-register 'add/sub' for thumb does not have a cc_out operand
4810  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4811  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4812  // right, this will result in better diagnostics (which operand is off)
4813  // anyway.
4814  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4815      (Operands.size() == 5 || Operands.size() == 6) &&
4816      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4817      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4818      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4819    return true;
4820
4821  return false;
4822}
4823
4824static bool isDataTypeToken(StringRef Tok) {
4825  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4826    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4827    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4828    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4829    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4830    Tok == ".f" || Tok == ".d";
4831}
4832
4833// FIXME: This bit should probably be handled via an explicit match class
4834// in the .td files that matches the suffix instead of having it be
4835// a literal string token the way it is now.
4836static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4837  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4838}
4839
4840static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4841/// Parse an arm instruction mnemonic followed by its operands.
4842bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4843                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4844  // Apply mnemonic aliases before doing anything else, as the destination
4845  // mnemnonic may include suffices and we want to handle them normally.
4846  // The generic tblgen'erated code does this later, at the start of
4847  // MatchInstructionImpl(), but that's too late for aliases that include
4848  // any sort of suffix.
4849  unsigned AvailableFeatures = getAvailableFeatures();
4850  applyMnemonicAliases(Name, AvailableFeatures);
4851
4852  // First check for the ARM-specific .req directive.
4853  if (Parser.getTok().is(AsmToken::Identifier) &&
4854      Parser.getTok().getIdentifier() == ".req") {
4855    parseDirectiveReq(Name, NameLoc);
4856    // We always return 'error' for this, as we're done with this
4857    // statement and don't need to match the 'instruction."
4858    return true;
4859  }
4860
4861  // Create the leading tokens for the mnemonic, split by '.' characters.
4862  size_t Start = 0, Next = Name.find('.');
4863  StringRef Mnemonic = Name.slice(Start, Next);
4864
4865  // Split out the predication code and carry setting flag from the mnemonic.
4866  unsigned PredicationCode;
4867  unsigned ProcessorIMod;
4868  bool CarrySetting;
4869  StringRef ITMask;
4870  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4871                           ProcessorIMod, ITMask);
4872
4873  // In Thumb1, only the branch (B) instruction can be predicated.
4874  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4875    Parser.EatToEndOfStatement();
4876    return Error(NameLoc, "conditional execution not supported in Thumb1");
4877  }
4878
4879  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4880
4881  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4882  // is the mask as it will be for the IT encoding if the conditional
4883  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4884  // where the conditional bit0 is zero, the instruction post-processing
4885  // will adjust the mask accordingly.
4886  if (Mnemonic == "it") {
4887    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4888    if (ITMask.size() > 3) {
4889      Parser.EatToEndOfStatement();
4890      return Error(Loc, "too many conditions on IT instruction");
4891    }
4892    unsigned Mask = 8;
4893    for (unsigned i = ITMask.size(); i != 0; --i) {
4894      char pos = ITMask[i - 1];
4895      if (pos != 't' && pos != 'e') {
4896        Parser.EatToEndOfStatement();
4897        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4898      }
4899      Mask >>= 1;
4900      if (ITMask[i - 1] == 't')
4901        Mask |= 8;
4902    }
4903    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4904  }
4905
4906  // FIXME: This is all a pretty gross hack. We should automatically handle
4907  // optional operands like this via tblgen.
4908
4909  // Next, add the CCOut and ConditionCode operands, if needed.
4910  //
4911  // For mnemonics which can ever incorporate a carry setting bit or predication
4912  // code, our matching model involves us always generating CCOut and
4913  // ConditionCode operands to match the mnemonic "as written" and then we let
4914  // the matcher deal with finding the right instruction or generating an
4915  // appropriate error.
4916  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4917  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4918
4919  // If we had a carry-set on an instruction that can't do that, issue an
4920  // error.
4921  if (!CanAcceptCarrySet && CarrySetting) {
4922    Parser.EatToEndOfStatement();
4923    return Error(NameLoc, "instruction '" + Mnemonic +
4924                 "' can not set flags, but 's' suffix specified");
4925  }
4926  // If we had a predication code on an instruction that can't do that, issue an
4927  // error.
4928  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4929    Parser.EatToEndOfStatement();
4930    return Error(NameLoc, "instruction '" + Mnemonic +
4931                 "' is not predicable, but condition code specified");
4932  }
4933
4934  // Add the carry setting operand, if necessary.
4935  if (CanAcceptCarrySet) {
4936    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4937    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4938                                               Loc));
4939  }
4940
4941  // Add the predication code operand, if necessary.
4942  if (CanAcceptPredicationCode) {
4943    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4944                                      CarrySetting);
4945    Operands.push_back(ARMOperand::CreateCondCode(
4946                         ARMCC::CondCodes(PredicationCode), Loc));
4947  }
4948
4949  // Add the processor imod operand, if necessary.
4950  if (ProcessorIMod) {
4951    Operands.push_back(ARMOperand::CreateImm(
4952          MCConstantExpr::Create(ProcessorIMod, getContext()),
4953                                 NameLoc, NameLoc));
4954  }
4955
4956  // Add the remaining tokens in the mnemonic.
4957  while (Next != StringRef::npos) {
4958    Start = Next;
4959    Next = Name.find('.', Start + 1);
4960    StringRef ExtraToken = Name.slice(Start, Next);
4961
4962    // Some NEON instructions have an optional datatype suffix that is
4963    // completely ignored. Check for that.
4964    if (isDataTypeToken(ExtraToken) &&
4965        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4966      continue;
4967
4968    if (ExtraToken != ".n") {
4969      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4970      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4971    }
4972  }
4973
4974  // Read the remaining operands.
4975  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4976    // Read the first operand.
4977    if (parseOperand(Operands, Mnemonic)) {
4978      Parser.EatToEndOfStatement();
4979      return true;
4980    }
4981
4982    while (getLexer().is(AsmToken::Comma)) {
4983      Parser.Lex();  // Eat the comma.
4984
4985      // Parse and remember the operand.
4986      if (parseOperand(Operands, Mnemonic)) {
4987        Parser.EatToEndOfStatement();
4988        return true;
4989      }
4990    }
4991  }
4992
4993  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4994    SMLoc Loc = getLexer().getLoc();
4995    Parser.EatToEndOfStatement();
4996    return Error(Loc, "unexpected token in argument list");
4997  }
4998
4999  Parser.Lex(); // Consume the EndOfStatement
5000
5001  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5002  // do and don't have a cc_out optional-def operand. With some spot-checks
5003  // of the operand list, we can figure out which variant we're trying to
5004  // parse and adjust accordingly before actually matching. We shouldn't ever
5005  // try to remove a cc_out operand that was explicitly set on the the
5006  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5007  // table driven matcher doesn't fit well with the ARM instruction set.
5008  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5009    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5010    Operands.erase(Operands.begin() + 1);
5011    delete Op;
5012  }
5013
5014  // ARM mode 'blx' need special handling, as the register operand version
5015  // is predicable, but the label operand version is not. So, we can't rely
5016  // on the Mnemonic based checking to correctly figure out when to put
5017  // a k_CondCode operand in the list. If we're trying to match the label
5018  // version, remove the k_CondCode operand here.
5019  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5020      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5021    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5022    Operands.erase(Operands.begin() + 1);
5023    delete Op;
5024  }
5025
5026  // The vector-compare-to-zero instructions have a literal token "#0" at
5027  // the end that comes to here as an immediate operand. Convert it to a
5028  // token to play nicely with the matcher.
5029  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5030      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5031      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5032    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5033    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5034    if (CE && CE->getValue() == 0) {
5035      Operands.erase(Operands.begin() + 5);
5036      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5037      delete Op;
5038    }
5039  }
5040  // VCMP{E} does the same thing, but with a different operand count.
5041  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5042      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5043    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5044    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5045    if (CE && CE->getValue() == 0) {
5046      Operands.erase(Operands.begin() + 4);
5047      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5048      delete Op;
5049    }
5050  }
5051  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5052  // end. Convert it to a token here. Take care not to convert those
5053  // that should hit the Thumb2 encoding.
5054  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5055      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5056      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5057      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5058    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5059    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5060    if (CE && CE->getValue() == 0 &&
5061        (isThumbOne() ||
5062         // The cc_out operand matches the IT block.
5063         ((inITBlock() != CarrySetting) &&
5064         // Neither register operand is a high register.
5065         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5066          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5067      Operands.erase(Operands.begin() + 5);
5068      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5069      delete Op;
5070    }
5071  }
5072
5073  return false;
5074}
5075
5076// Validate context-sensitive operand constraints.
5077
5078// return 'true' if register list contains non-low GPR registers,
5079// 'false' otherwise. If Reg is in the register list or is HiReg, set
5080// 'containsReg' to true.
5081static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5082                                 unsigned HiReg, bool &containsReg) {
5083  containsReg = false;
5084  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5085    unsigned OpReg = Inst.getOperand(i).getReg();
5086    if (OpReg == Reg)
5087      containsReg = true;
5088    // Anything other than a low register isn't legal here.
5089    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5090      return true;
5091  }
5092  return false;
5093}
5094
5095// Check if the specified regisgter is in the register list of the inst,
5096// starting at the indicated operand number.
5097static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5098  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5099    unsigned OpReg = Inst.getOperand(i).getReg();
5100    if (OpReg == Reg)
5101      return true;
5102  }
5103  return false;
5104}
5105
5106// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5107// the ARMInsts array) instead. Getting that here requires awkward
5108// API changes, though. Better way?
5109namespace llvm {
5110extern const MCInstrDesc ARMInsts[];
5111}
5112static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5113  return ARMInsts[Opcode];
5114}
5115
5116// FIXME: We would really like to be able to tablegen'erate this.
5117bool ARMAsmParser::
5118validateInstruction(MCInst &Inst,
5119                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5120  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5121  SMLoc Loc = Operands[0]->getStartLoc();
5122  // Check the IT block state first.
5123  // NOTE: BKPT instruction has the interesting property of being
5124  // allowed in IT blocks, but not being predicable.  It just always
5125  // executes.
5126  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5127      Inst.getOpcode() != ARM::BKPT) {
5128    unsigned bit = 1;
5129    if (ITState.FirstCond)
5130      ITState.FirstCond = false;
5131    else
5132      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5133    // The instruction must be predicable.
5134    if (!MCID.isPredicable())
5135      return Error(Loc, "instructions in IT block must be predicable");
5136    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5137    unsigned ITCond = bit ? ITState.Cond :
5138      ARMCC::getOppositeCondition(ITState.Cond);
5139    if (Cond != ITCond) {
5140      // Find the condition code Operand to get its SMLoc information.
5141      SMLoc CondLoc;
5142      for (unsigned i = 1; i < Operands.size(); ++i)
5143        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5144          CondLoc = Operands[i]->getStartLoc();
5145      return Error(CondLoc, "incorrect condition in IT block; got '" +
5146                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5147                   "', but expected '" +
5148                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5149    }
5150  // Check for non-'al' condition codes outside of the IT block.
5151  } else if (isThumbTwo() && MCID.isPredicable() &&
5152             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5153             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5154             Inst.getOpcode() != ARM::t2B)
5155    return Error(Loc, "predicated instructions must be in IT block");
5156
5157  switch (Inst.getOpcode()) {
5158  case ARM::LDRD:
5159  case ARM::LDRD_PRE:
5160  case ARM::LDRD_POST:
5161  case ARM::LDREXD: {
5162    // Rt2 must be Rt + 1.
5163    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5164    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5165    if (Rt2 != Rt + 1)
5166      return Error(Operands[3]->getStartLoc(),
5167                   "destination operands must be sequential");
5168    return false;
5169  }
5170  case ARM::STRD: {
5171    // Rt2 must be Rt + 1.
5172    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5173    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5174    if (Rt2 != Rt + 1)
5175      return Error(Operands[3]->getStartLoc(),
5176                   "source operands must be sequential");
5177    return false;
5178  }
5179  case ARM::STRD_PRE:
5180  case ARM::STRD_POST:
5181  case ARM::STREXD: {
5182    // Rt2 must be Rt + 1.
5183    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5184    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5185    if (Rt2 != Rt + 1)
5186      return Error(Operands[3]->getStartLoc(),
5187                   "source operands must be sequential");
5188    return false;
5189  }
5190  case ARM::SBFX:
5191  case ARM::UBFX: {
5192    // width must be in range [1, 32-lsb]
5193    unsigned lsb = Inst.getOperand(2).getImm();
5194    unsigned widthm1 = Inst.getOperand(3).getImm();
5195    if (widthm1 >= 32 - lsb)
5196      return Error(Operands[5]->getStartLoc(),
5197                   "bitfield width must be in range [1,32-lsb]");
5198    return false;
5199  }
5200  case ARM::tLDMIA: {
5201    // If we're parsing Thumb2, the .w variant is available and handles
5202    // most cases that are normally illegal for a Thumb1 LDM
5203    // instruction. We'll make the transformation in processInstruction()
5204    // if necessary.
5205    //
5206    // Thumb LDM instructions are writeback iff the base register is not
5207    // in the register list.
5208    unsigned Rn = Inst.getOperand(0).getReg();
5209    bool hasWritebackToken =
5210      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5211       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5212    bool listContainsBase;
5213    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5214      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5215                   "registers must be in range r0-r7");
5216    // If we should have writeback, then there should be a '!' token.
5217    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5218      return Error(Operands[2]->getStartLoc(),
5219                   "writeback operator '!' expected");
5220    // If we should not have writeback, there must not be a '!'. This is
5221    // true even for the 32-bit wide encodings.
5222    if (listContainsBase && hasWritebackToken)
5223      return Error(Operands[3]->getStartLoc(),
5224                   "writeback operator '!' not allowed when base register "
5225                   "in register list");
5226
5227    break;
5228  }
5229  case ARM::t2LDMIA_UPD: {
5230    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5231      return Error(Operands[4]->getStartLoc(),
5232                   "writeback operator '!' not allowed when base register "
5233                   "in register list");
5234    break;
5235  }
5236  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5237  // so only issue a diagnostic for thumb1. The instructions will be
5238  // switched to the t2 encodings in processInstruction() if necessary.
5239  case ARM::tPOP: {
5240    bool listContainsBase;
5241    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5242        !isThumbTwo())
5243      return Error(Operands[2]->getStartLoc(),
5244                   "registers must be in range r0-r7 or pc");
5245    break;
5246  }
5247  case ARM::tPUSH: {
5248    bool listContainsBase;
5249    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5250        !isThumbTwo())
5251      return Error(Operands[2]->getStartLoc(),
5252                   "registers must be in range r0-r7 or lr");
5253    break;
5254  }
5255  case ARM::tSTMIA_UPD: {
5256    bool listContainsBase;
5257    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5258      return Error(Operands[4]->getStartLoc(),
5259                   "registers must be in range r0-r7");
5260    break;
5261  }
5262  }
5263
5264  return false;
5265}
5266
5267static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5268  switch(Opc) {
5269  default: llvm_unreachable("unexpected opcode!");
5270  // VST1LN
5271  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5272  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5273  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5274  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5275  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5276  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5277  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5278  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5279  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5280
5281  // VST2LN
5282  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5283  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5284  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5285  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5286  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5287
5288  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5289  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5290  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5291  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5292  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5293
5294  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5295  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5296  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5297  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5298  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5299
5300  // VST3LN
5301  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5302  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5303  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5304  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5305  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5306  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5307  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5308  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5309  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5310  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5311  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5312  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5313  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5314  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5315  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5316
5317  // VST3
5318  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5319  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5320  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5321  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5322  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5323  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5324  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5325  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5326  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5327  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5328  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5329  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5330  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5331  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5332  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5333  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5334  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5335  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5336
5337  // VST4LN
5338  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5339  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5340  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5341  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5342  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5343  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5344  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5345  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5346  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5347  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5348  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5349  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5350  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5351  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5352  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5353
5354  // VST4
5355  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5356  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5357  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5358  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5359  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5360  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5361  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5362  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5363  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5364  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5365  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5366  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5367  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5368  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5369  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5370  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5371  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5372  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5373  }
5374}
5375
5376static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5377  switch(Opc) {
5378  default: llvm_unreachable("unexpected opcode!");
5379  // VLD1LN
5380  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5381  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5382  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5383  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5384  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5385  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5386  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5387  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5388  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5389
5390  // VLD2LN
5391  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5392  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5393  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5394  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5395  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5396  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5397  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5398  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5399  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5400  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5401  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5402  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5403  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5404  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5405  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5406
5407  // VLD3DUP
5408  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5409  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5410  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5411  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5412  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5413  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5414  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5415  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5416  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5417  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5418  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5419  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5420  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5421  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5422  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5423  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5424  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5425  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5426
5427  // VLD3LN
5428  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5429  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5430  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5431  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5432  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5433  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5434  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5435  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5436  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5437  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5438  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5439  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5440  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5441  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5442  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5443
5444  // VLD3
5445  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5446  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5447  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5448  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5449  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5450  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5451  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5452  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5453  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5454  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5455  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5456  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5457  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5458  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5459  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5460  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5461  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5462  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5463
5464  // VLD4LN
5465  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5466  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5467  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5468  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5469  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5470  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5471  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5472  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5473  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5474  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5475  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5476  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5477  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5478  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5479  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5480
5481  // VLD4DUP
5482  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5483  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5484  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5485  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5486  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5487  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5488  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5489  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5490  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5491  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5492  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5493  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5494  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5495  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5496  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5497  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5498  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5499  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5500
5501  // VLD4
5502  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5503  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5504  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5505  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5506  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5507  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5508  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5509  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5510  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5511  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5512  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5513  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5514  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5515  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5516  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5517  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5518  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5519  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5520  }
5521}
5522
5523bool ARMAsmParser::
5524processInstruction(MCInst &Inst,
5525                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5526  switch (Inst.getOpcode()) {
5527  // Aliases for alternate PC+imm syntax of LDR instructions.
5528  case ARM::t2LDRpcrel:
5529    Inst.setOpcode(ARM::t2LDRpci);
5530    return true;
5531  case ARM::t2LDRBpcrel:
5532    Inst.setOpcode(ARM::t2LDRBpci);
5533    return true;
5534  case ARM::t2LDRHpcrel:
5535    Inst.setOpcode(ARM::t2LDRHpci);
5536    return true;
5537  case ARM::t2LDRSBpcrel:
5538    Inst.setOpcode(ARM::t2LDRSBpci);
5539    return true;
5540  case ARM::t2LDRSHpcrel:
5541    Inst.setOpcode(ARM::t2LDRSHpci);
5542    return true;
5543  // Handle NEON VST complex aliases.
5544  case ARM::VST1LNdWB_register_Asm_8:
5545  case ARM::VST1LNdWB_register_Asm_16:
5546  case ARM::VST1LNdWB_register_Asm_32: {
5547    MCInst TmpInst;
5548    // Shuffle the operands around so the lane index operand is in the
5549    // right place.
5550    unsigned Spacing;
5551    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5552    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5553    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5554    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5555    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5556    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5557    TmpInst.addOperand(Inst.getOperand(1)); // lane
5558    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5559    TmpInst.addOperand(Inst.getOperand(6));
5560    Inst = TmpInst;
5561    return true;
5562  }
5563
5564  case ARM::VST2LNdWB_register_Asm_8:
5565  case ARM::VST2LNdWB_register_Asm_16:
5566  case ARM::VST2LNdWB_register_Asm_32:
5567  case ARM::VST2LNqWB_register_Asm_16:
5568  case ARM::VST2LNqWB_register_Asm_32: {
5569    MCInst TmpInst;
5570    // Shuffle the operands around so the lane index operand is in the
5571    // right place.
5572    unsigned Spacing;
5573    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5574    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5575    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5576    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5577    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5578    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5579    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5580                                            Spacing));
5581    TmpInst.addOperand(Inst.getOperand(1)); // lane
5582    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5583    TmpInst.addOperand(Inst.getOperand(6));
5584    Inst = TmpInst;
5585    return true;
5586  }
5587
5588  case ARM::VST3LNdWB_register_Asm_8:
5589  case ARM::VST3LNdWB_register_Asm_16:
5590  case ARM::VST3LNdWB_register_Asm_32:
5591  case ARM::VST3LNqWB_register_Asm_16:
5592  case ARM::VST3LNqWB_register_Asm_32: {
5593    MCInst TmpInst;
5594    // Shuffle the operands around so the lane index operand is in the
5595    // right place.
5596    unsigned Spacing;
5597    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5598    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5599    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5600    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5601    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5602    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5603    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5604                                            Spacing));
5605    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5606                                            Spacing * 2));
5607    TmpInst.addOperand(Inst.getOperand(1)); // lane
5608    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5609    TmpInst.addOperand(Inst.getOperand(6));
5610    Inst = TmpInst;
5611    return true;
5612  }
5613
5614  case ARM::VST4LNdWB_register_Asm_8:
5615  case ARM::VST4LNdWB_register_Asm_16:
5616  case ARM::VST4LNdWB_register_Asm_32:
5617  case ARM::VST4LNqWB_register_Asm_16:
5618  case ARM::VST4LNqWB_register_Asm_32: {
5619    MCInst TmpInst;
5620    // Shuffle the operands around so the lane index operand is in the
5621    // right place.
5622    unsigned Spacing;
5623    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5624    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5625    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5626    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5627    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5628    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5629    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5630                                            Spacing));
5631    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5632                                            Spacing * 2));
5633    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5634                                            Spacing * 3));
5635    TmpInst.addOperand(Inst.getOperand(1)); // lane
5636    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5637    TmpInst.addOperand(Inst.getOperand(6));
5638    Inst = TmpInst;
5639    return true;
5640  }
5641
5642  case ARM::VST1LNdWB_fixed_Asm_8:
5643  case ARM::VST1LNdWB_fixed_Asm_16:
5644  case ARM::VST1LNdWB_fixed_Asm_32: {
5645    MCInst TmpInst;
5646    // Shuffle the operands around so the lane index operand is in the
5647    // right place.
5648    unsigned Spacing;
5649    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5650    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5651    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5652    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5653    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5654    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5655    TmpInst.addOperand(Inst.getOperand(1)); // lane
5656    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5657    TmpInst.addOperand(Inst.getOperand(5));
5658    Inst = TmpInst;
5659    return true;
5660  }
5661
5662  case ARM::VST2LNdWB_fixed_Asm_8:
5663  case ARM::VST2LNdWB_fixed_Asm_16:
5664  case ARM::VST2LNdWB_fixed_Asm_32:
5665  case ARM::VST2LNqWB_fixed_Asm_16:
5666  case ARM::VST2LNqWB_fixed_Asm_32: {
5667    MCInst TmpInst;
5668    // Shuffle the operands around so the lane index operand is in the
5669    // right place.
5670    unsigned Spacing;
5671    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5672    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5673    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5674    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5675    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5676    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5677    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5678                                            Spacing));
5679    TmpInst.addOperand(Inst.getOperand(1)); // lane
5680    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5681    TmpInst.addOperand(Inst.getOperand(5));
5682    Inst = TmpInst;
5683    return true;
5684  }
5685
5686  case ARM::VST3LNdWB_fixed_Asm_8:
5687  case ARM::VST3LNdWB_fixed_Asm_16:
5688  case ARM::VST3LNdWB_fixed_Asm_32:
5689  case ARM::VST3LNqWB_fixed_Asm_16:
5690  case ARM::VST3LNqWB_fixed_Asm_32: {
5691    MCInst TmpInst;
5692    // Shuffle the operands around so the lane index operand is in the
5693    // right place.
5694    unsigned Spacing;
5695    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5696    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5697    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5698    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5699    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5700    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5701    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5702                                            Spacing));
5703    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5704                                            Spacing * 2));
5705    TmpInst.addOperand(Inst.getOperand(1)); // lane
5706    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5707    TmpInst.addOperand(Inst.getOperand(5));
5708    Inst = TmpInst;
5709    return true;
5710  }
5711
5712  case ARM::VST4LNdWB_fixed_Asm_8:
5713  case ARM::VST4LNdWB_fixed_Asm_16:
5714  case ARM::VST4LNdWB_fixed_Asm_32:
5715  case ARM::VST4LNqWB_fixed_Asm_16:
5716  case ARM::VST4LNqWB_fixed_Asm_32: {
5717    MCInst TmpInst;
5718    // Shuffle the operands around so the lane index operand is in the
5719    // right place.
5720    unsigned Spacing;
5721    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5722    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5723    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5724    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5725    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5726    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5727    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5728                                            Spacing));
5729    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5730                                            Spacing * 2));
5731    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5732                                            Spacing * 3));
5733    TmpInst.addOperand(Inst.getOperand(1)); // lane
5734    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5735    TmpInst.addOperand(Inst.getOperand(5));
5736    Inst = TmpInst;
5737    return true;
5738  }
5739
5740  case ARM::VST1LNdAsm_8:
5741  case ARM::VST1LNdAsm_16:
5742  case ARM::VST1LNdAsm_32: {
5743    MCInst TmpInst;
5744    // Shuffle the operands around so the lane index operand is in the
5745    // right place.
5746    unsigned Spacing;
5747    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5748    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5749    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5750    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5751    TmpInst.addOperand(Inst.getOperand(1)); // lane
5752    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5753    TmpInst.addOperand(Inst.getOperand(5));
5754    Inst = TmpInst;
5755    return true;
5756  }
5757
5758  case ARM::VST2LNdAsm_8:
5759  case ARM::VST2LNdAsm_16:
5760  case ARM::VST2LNdAsm_32:
5761  case ARM::VST2LNqAsm_16:
5762  case ARM::VST2LNqAsm_32: {
5763    MCInst TmpInst;
5764    // Shuffle the operands around so the lane index operand is in the
5765    // right place.
5766    unsigned Spacing;
5767    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5768    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5769    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5770    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5771    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5772                                            Spacing));
5773    TmpInst.addOperand(Inst.getOperand(1)); // lane
5774    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5775    TmpInst.addOperand(Inst.getOperand(5));
5776    Inst = TmpInst;
5777    return true;
5778  }
5779
5780  case ARM::VST3LNdAsm_8:
5781  case ARM::VST3LNdAsm_16:
5782  case ARM::VST3LNdAsm_32:
5783  case ARM::VST3LNqAsm_16:
5784  case ARM::VST3LNqAsm_32: {
5785    MCInst TmpInst;
5786    // Shuffle the operands around so the lane index operand is in the
5787    // right place.
5788    unsigned Spacing;
5789    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5790    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5791    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5792    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5793    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5794                                            Spacing));
5795    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5796                                            Spacing * 2));
5797    TmpInst.addOperand(Inst.getOperand(1)); // lane
5798    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5799    TmpInst.addOperand(Inst.getOperand(5));
5800    Inst = TmpInst;
5801    return true;
5802  }
5803
5804  case ARM::VST4LNdAsm_8:
5805  case ARM::VST4LNdAsm_16:
5806  case ARM::VST4LNdAsm_32:
5807  case ARM::VST4LNqAsm_16:
5808  case ARM::VST4LNqAsm_32: {
5809    MCInst TmpInst;
5810    // Shuffle the operands around so the lane index operand is in the
5811    // right place.
5812    unsigned Spacing;
5813    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5814    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5815    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5816    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5817    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5818                                            Spacing));
5819    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5820                                            Spacing * 2));
5821    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5822                                            Spacing * 3));
5823    TmpInst.addOperand(Inst.getOperand(1)); // lane
5824    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5825    TmpInst.addOperand(Inst.getOperand(5));
5826    Inst = TmpInst;
5827    return true;
5828  }
5829
5830  // Handle NEON VLD complex aliases.
5831  case ARM::VLD1LNdWB_register_Asm_8:
5832  case ARM::VLD1LNdWB_register_Asm_16:
5833  case ARM::VLD1LNdWB_register_Asm_32: {
5834    MCInst TmpInst;
5835    // Shuffle the operands around so the lane index operand is in the
5836    // right place.
5837    unsigned Spacing;
5838    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5839    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5840    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5841    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5842    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5843    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5844    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5845    TmpInst.addOperand(Inst.getOperand(1)); // lane
5846    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5847    TmpInst.addOperand(Inst.getOperand(6));
5848    Inst = TmpInst;
5849    return true;
5850  }
5851
5852  case ARM::VLD2LNdWB_register_Asm_8:
5853  case ARM::VLD2LNdWB_register_Asm_16:
5854  case ARM::VLD2LNdWB_register_Asm_32:
5855  case ARM::VLD2LNqWB_register_Asm_16:
5856  case ARM::VLD2LNqWB_register_Asm_32: {
5857    MCInst TmpInst;
5858    // Shuffle the operands around so the lane index operand is in the
5859    // right place.
5860    unsigned Spacing;
5861    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5862    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5863    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5864                                            Spacing));
5865    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5866    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5867    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5868    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5869    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5870    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5871                                            Spacing));
5872    TmpInst.addOperand(Inst.getOperand(1)); // lane
5873    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5874    TmpInst.addOperand(Inst.getOperand(6));
5875    Inst = TmpInst;
5876    return true;
5877  }
5878
5879  case ARM::VLD3LNdWB_register_Asm_8:
5880  case ARM::VLD3LNdWB_register_Asm_16:
5881  case ARM::VLD3LNdWB_register_Asm_32:
5882  case ARM::VLD3LNqWB_register_Asm_16:
5883  case ARM::VLD3LNqWB_register_Asm_32: {
5884    MCInst TmpInst;
5885    // Shuffle the operands around so the lane index operand is in the
5886    // right place.
5887    unsigned Spacing;
5888    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5889    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5890    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5891                                            Spacing));
5892    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5893                                            Spacing * 2));
5894    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5895    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5896    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5897    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5898    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5899    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5900                                            Spacing));
5901    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5902                                            Spacing * 2));
5903    TmpInst.addOperand(Inst.getOperand(1)); // lane
5904    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5905    TmpInst.addOperand(Inst.getOperand(6));
5906    Inst = TmpInst;
5907    return true;
5908  }
5909
5910  case ARM::VLD4LNdWB_register_Asm_8:
5911  case ARM::VLD4LNdWB_register_Asm_16:
5912  case ARM::VLD4LNdWB_register_Asm_32:
5913  case ARM::VLD4LNqWB_register_Asm_16:
5914  case ARM::VLD4LNqWB_register_Asm_32: {
5915    MCInst TmpInst;
5916    // Shuffle the operands around so the lane index operand is in the
5917    // right place.
5918    unsigned Spacing;
5919    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5920    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5921    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5922                                            Spacing));
5923    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5924                                            Spacing * 2));
5925    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5926                                            Spacing * 3));
5927    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5928    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5929    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5930    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5931    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5932    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5933                                            Spacing));
5934    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5935                                            Spacing * 2));
5936    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5937                                            Spacing * 3));
5938    TmpInst.addOperand(Inst.getOperand(1)); // lane
5939    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5940    TmpInst.addOperand(Inst.getOperand(6));
5941    Inst = TmpInst;
5942    return true;
5943  }
5944
5945  case ARM::VLD1LNdWB_fixed_Asm_8:
5946  case ARM::VLD1LNdWB_fixed_Asm_16:
5947  case ARM::VLD1LNdWB_fixed_Asm_32: {
5948    MCInst TmpInst;
5949    // Shuffle the operands around so the lane index operand is in the
5950    // right place.
5951    unsigned Spacing;
5952    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5953    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5954    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5955    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5956    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5957    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5958    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5959    TmpInst.addOperand(Inst.getOperand(1)); // lane
5960    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5961    TmpInst.addOperand(Inst.getOperand(5));
5962    Inst = TmpInst;
5963    return true;
5964  }
5965
5966  case ARM::VLD2LNdWB_fixed_Asm_8:
5967  case ARM::VLD2LNdWB_fixed_Asm_16:
5968  case ARM::VLD2LNdWB_fixed_Asm_32:
5969  case ARM::VLD2LNqWB_fixed_Asm_16:
5970  case ARM::VLD2LNqWB_fixed_Asm_32: {
5971    MCInst TmpInst;
5972    // Shuffle the operands around so the lane index operand is in the
5973    // right place.
5974    unsigned Spacing;
5975    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5976    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5977    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5978                                            Spacing));
5979    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5980    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5981    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5982    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5983    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5984    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5985                                            Spacing));
5986    TmpInst.addOperand(Inst.getOperand(1)); // lane
5987    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5988    TmpInst.addOperand(Inst.getOperand(5));
5989    Inst = TmpInst;
5990    return true;
5991  }
5992
5993  case ARM::VLD3LNdWB_fixed_Asm_8:
5994  case ARM::VLD3LNdWB_fixed_Asm_16:
5995  case ARM::VLD3LNdWB_fixed_Asm_32:
5996  case ARM::VLD3LNqWB_fixed_Asm_16:
5997  case ARM::VLD3LNqWB_fixed_Asm_32: {
5998    MCInst TmpInst;
5999    // Shuffle the operands around so the lane index operand is in the
6000    // right place.
6001    unsigned Spacing;
6002    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6003    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6004    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6005                                            Spacing));
6006    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6007                                            Spacing * 2));
6008    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6009    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6010    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6011    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6012    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6013    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6014                                            Spacing));
6015    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6016                                            Spacing * 2));
6017    TmpInst.addOperand(Inst.getOperand(1)); // lane
6018    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6019    TmpInst.addOperand(Inst.getOperand(5));
6020    Inst = TmpInst;
6021    return true;
6022  }
6023
6024  case ARM::VLD4LNdWB_fixed_Asm_8:
6025  case ARM::VLD4LNdWB_fixed_Asm_16:
6026  case ARM::VLD4LNdWB_fixed_Asm_32:
6027  case ARM::VLD4LNqWB_fixed_Asm_16:
6028  case ARM::VLD4LNqWB_fixed_Asm_32: {
6029    MCInst TmpInst;
6030    // Shuffle the operands around so the lane index operand is in the
6031    // right place.
6032    unsigned Spacing;
6033    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6034    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6035    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6036                                            Spacing));
6037    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6038                                            Spacing * 2));
6039    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6040                                            Spacing * 3));
6041    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6042    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6043    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6044    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6045    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6046    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6047                                            Spacing));
6048    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6049                                            Spacing * 2));
6050    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6051                                            Spacing * 3));
6052    TmpInst.addOperand(Inst.getOperand(1)); // lane
6053    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6054    TmpInst.addOperand(Inst.getOperand(5));
6055    Inst = TmpInst;
6056    return true;
6057  }
6058
6059  case ARM::VLD1LNdAsm_8:
6060  case ARM::VLD1LNdAsm_16:
6061  case ARM::VLD1LNdAsm_32: {
6062    MCInst TmpInst;
6063    // Shuffle the operands around so the lane index operand is in the
6064    // right place.
6065    unsigned Spacing;
6066    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6067    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6068    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6069    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6070    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6071    TmpInst.addOperand(Inst.getOperand(1)); // lane
6072    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6073    TmpInst.addOperand(Inst.getOperand(5));
6074    Inst = TmpInst;
6075    return true;
6076  }
6077
6078  case ARM::VLD2LNdAsm_8:
6079  case ARM::VLD2LNdAsm_16:
6080  case ARM::VLD2LNdAsm_32:
6081  case ARM::VLD2LNqAsm_16:
6082  case ARM::VLD2LNqAsm_32: {
6083    MCInst TmpInst;
6084    // Shuffle the operands around so the lane index operand is in the
6085    // right place.
6086    unsigned Spacing;
6087    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6088    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6089    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6090                                            Spacing));
6091    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6092    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6093    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6094    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6095                                            Spacing));
6096    TmpInst.addOperand(Inst.getOperand(1)); // lane
6097    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6098    TmpInst.addOperand(Inst.getOperand(5));
6099    Inst = TmpInst;
6100    return true;
6101  }
6102
6103  case ARM::VLD3LNdAsm_8:
6104  case ARM::VLD3LNdAsm_16:
6105  case ARM::VLD3LNdAsm_32:
6106  case ARM::VLD3LNqAsm_16:
6107  case ARM::VLD3LNqAsm_32: {
6108    MCInst TmpInst;
6109    // Shuffle the operands around so the lane index operand is in the
6110    // right place.
6111    unsigned Spacing;
6112    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6113    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6114    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6115                                            Spacing));
6116    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6117                                            Spacing * 2));
6118    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6119    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6120    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6121    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6122                                            Spacing));
6123    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6124                                            Spacing * 2));
6125    TmpInst.addOperand(Inst.getOperand(1)); // lane
6126    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6127    TmpInst.addOperand(Inst.getOperand(5));
6128    Inst = TmpInst;
6129    return true;
6130  }
6131
6132  case ARM::VLD4LNdAsm_8:
6133  case ARM::VLD4LNdAsm_16:
6134  case ARM::VLD4LNdAsm_32:
6135  case ARM::VLD4LNqAsm_16:
6136  case ARM::VLD4LNqAsm_32: {
6137    MCInst TmpInst;
6138    // Shuffle the operands around so the lane index operand is in the
6139    // right place.
6140    unsigned Spacing;
6141    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6142    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6143    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6144                                            Spacing));
6145    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6146                                            Spacing * 2));
6147    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6148                                            Spacing * 3));
6149    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6150    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6151    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6152    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6153                                            Spacing));
6154    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6155                                            Spacing * 2));
6156    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6157                                            Spacing * 3));
6158    TmpInst.addOperand(Inst.getOperand(1)); // lane
6159    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6160    TmpInst.addOperand(Inst.getOperand(5));
6161    Inst = TmpInst;
6162    return true;
6163  }
6164
6165  // VLD3DUP single 3-element structure to all lanes instructions.
6166  case ARM::VLD3DUPdAsm_8:
6167  case ARM::VLD3DUPdAsm_16:
6168  case ARM::VLD3DUPdAsm_32:
6169  case ARM::VLD3DUPqAsm_8:
6170  case ARM::VLD3DUPqAsm_16:
6171  case ARM::VLD3DUPqAsm_32: {
6172    MCInst TmpInst;
6173    unsigned Spacing;
6174    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6175    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6176    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6177                                            Spacing));
6178    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6179                                            Spacing * 2));
6180    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6181    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6182    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6183    TmpInst.addOperand(Inst.getOperand(4));
6184    Inst = TmpInst;
6185    return true;
6186  }
6187
6188  case ARM::VLD3DUPdWB_fixed_Asm_8:
6189  case ARM::VLD3DUPdWB_fixed_Asm_16:
6190  case ARM::VLD3DUPdWB_fixed_Asm_32:
6191  case ARM::VLD3DUPqWB_fixed_Asm_8:
6192  case ARM::VLD3DUPqWB_fixed_Asm_16:
6193  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6194    MCInst TmpInst;
6195    unsigned Spacing;
6196    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6197    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6198    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6199                                            Spacing));
6200    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6201                                            Spacing * 2));
6202    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6203    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6204    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6205    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6206    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6207    TmpInst.addOperand(Inst.getOperand(4));
6208    Inst = TmpInst;
6209    return true;
6210  }
6211
6212  case ARM::VLD3DUPdWB_register_Asm_8:
6213  case ARM::VLD3DUPdWB_register_Asm_16:
6214  case ARM::VLD3DUPdWB_register_Asm_32:
6215  case ARM::VLD3DUPqWB_register_Asm_8:
6216  case ARM::VLD3DUPqWB_register_Asm_16:
6217  case ARM::VLD3DUPqWB_register_Asm_32: {
6218    MCInst TmpInst;
6219    unsigned Spacing;
6220    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6221    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6222    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6223                                            Spacing));
6224    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6225                                            Spacing * 2));
6226    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6227    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6228    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6229    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6230    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6231    TmpInst.addOperand(Inst.getOperand(5));
6232    Inst = TmpInst;
6233    return true;
6234  }
6235
6236  // VLD3 multiple 3-element structure instructions.
6237  case ARM::VLD3dAsm_8:
6238  case ARM::VLD3dAsm_16:
6239  case ARM::VLD3dAsm_32:
6240  case ARM::VLD3qAsm_8:
6241  case ARM::VLD3qAsm_16:
6242  case ARM::VLD3qAsm_32: {
6243    MCInst TmpInst;
6244    unsigned Spacing;
6245    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6246    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6247    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6248                                            Spacing));
6249    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6250                                            Spacing * 2));
6251    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6252    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6253    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6254    TmpInst.addOperand(Inst.getOperand(4));
6255    Inst = TmpInst;
6256    return true;
6257  }
6258
6259  case ARM::VLD3dWB_fixed_Asm_8:
6260  case ARM::VLD3dWB_fixed_Asm_16:
6261  case ARM::VLD3dWB_fixed_Asm_32:
6262  case ARM::VLD3qWB_fixed_Asm_8:
6263  case ARM::VLD3qWB_fixed_Asm_16:
6264  case ARM::VLD3qWB_fixed_Asm_32: {
6265    MCInst TmpInst;
6266    unsigned Spacing;
6267    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6268    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6269    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6270                                            Spacing));
6271    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6272                                            Spacing * 2));
6273    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6274    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6275    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6276    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6277    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6278    TmpInst.addOperand(Inst.getOperand(4));
6279    Inst = TmpInst;
6280    return true;
6281  }
6282
6283  case ARM::VLD3dWB_register_Asm_8:
6284  case ARM::VLD3dWB_register_Asm_16:
6285  case ARM::VLD3dWB_register_Asm_32:
6286  case ARM::VLD3qWB_register_Asm_8:
6287  case ARM::VLD3qWB_register_Asm_16:
6288  case ARM::VLD3qWB_register_Asm_32: {
6289    MCInst TmpInst;
6290    unsigned Spacing;
6291    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6292    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6293    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6294                                            Spacing));
6295    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6296                                            Spacing * 2));
6297    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6298    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6299    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6300    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6301    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6302    TmpInst.addOperand(Inst.getOperand(5));
6303    Inst = TmpInst;
6304    return true;
6305  }
6306
6307  // VLD4DUP single 3-element structure to all lanes instructions.
6308  case ARM::VLD4DUPdAsm_8:
6309  case ARM::VLD4DUPdAsm_16:
6310  case ARM::VLD4DUPdAsm_32:
6311  case ARM::VLD4DUPqAsm_8:
6312  case ARM::VLD4DUPqAsm_16:
6313  case ARM::VLD4DUPqAsm_32: {
6314    MCInst TmpInst;
6315    unsigned Spacing;
6316    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6317    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6318    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6319                                            Spacing));
6320    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6321                                            Spacing * 2));
6322    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6323                                            Spacing * 3));
6324    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6325    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6326    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6327    TmpInst.addOperand(Inst.getOperand(4));
6328    Inst = TmpInst;
6329    return true;
6330  }
6331
6332  case ARM::VLD4DUPdWB_fixed_Asm_8:
6333  case ARM::VLD4DUPdWB_fixed_Asm_16:
6334  case ARM::VLD4DUPdWB_fixed_Asm_32:
6335  case ARM::VLD4DUPqWB_fixed_Asm_8:
6336  case ARM::VLD4DUPqWB_fixed_Asm_16:
6337  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6338    MCInst TmpInst;
6339    unsigned Spacing;
6340    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6341    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6342    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6343                                            Spacing));
6344    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6345                                            Spacing * 2));
6346    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6347                                            Spacing * 3));
6348    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6349    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6350    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6351    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6352    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6353    TmpInst.addOperand(Inst.getOperand(4));
6354    Inst = TmpInst;
6355    return true;
6356  }
6357
6358  case ARM::VLD4DUPdWB_register_Asm_8:
6359  case ARM::VLD4DUPdWB_register_Asm_16:
6360  case ARM::VLD4DUPdWB_register_Asm_32:
6361  case ARM::VLD4DUPqWB_register_Asm_8:
6362  case ARM::VLD4DUPqWB_register_Asm_16:
6363  case ARM::VLD4DUPqWB_register_Asm_32: {
6364    MCInst TmpInst;
6365    unsigned Spacing;
6366    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6367    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6368    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6369                                            Spacing));
6370    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6371                                            Spacing * 2));
6372    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6373                                            Spacing * 3));
6374    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6375    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6376    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6377    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6378    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6379    TmpInst.addOperand(Inst.getOperand(5));
6380    Inst = TmpInst;
6381    return true;
6382  }
6383
6384  // VLD4 multiple 4-element structure instructions.
6385  case ARM::VLD4dAsm_8:
6386  case ARM::VLD4dAsm_16:
6387  case ARM::VLD4dAsm_32:
6388  case ARM::VLD4qAsm_8:
6389  case ARM::VLD4qAsm_16:
6390  case ARM::VLD4qAsm_32: {
6391    MCInst TmpInst;
6392    unsigned Spacing;
6393    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6394    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6395    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6396                                            Spacing));
6397    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6398                                            Spacing * 2));
6399    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6400                                            Spacing * 3));
6401    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6402    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6403    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6404    TmpInst.addOperand(Inst.getOperand(4));
6405    Inst = TmpInst;
6406    return true;
6407  }
6408
6409  case ARM::VLD4dWB_fixed_Asm_8:
6410  case ARM::VLD4dWB_fixed_Asm_16:
6411  case ARM::VLD4dWB_fixed_Asm_32:
6412  case ARM::VLD4qWB_fixed_Asm_8:
6413  case ARM::VLD4qWB_fixed_Asm_16:
6414  case ARM::VLD4qWB_fixed_Asm_32: {
6415    MCInst TmpInst;
6416    unsigned Spacing;
6417    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6418    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6419    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6420                                            Spacing));
6421    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6422                                            Spacing * 2));
6423    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6424                                            Spacing * 3));
6425    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6426    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6427    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6428    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6429    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6430    TmpInst.addOperand(Inst.getOperand(4));
6431    Inst = TmpInst;
6432    return true;
6433  }
6434
6435  case ARM::VLD4dWB_register_Asm_8:
6436  case ARM::VLD4dWB_register_Asm_16:
6437  case ARM::VLD4dWB_register_Asm_32:
6438  case ARM::VLD4qWB_register_Asm_8:
6439  case ARM::VLD4qWB_register_Asm_16:
6440  case ARM::VLD4qWB_register_Asm_32: {
6441    MCInst TmpInst;
6442    unsigned Spacing;
6443    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6444    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6445    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6446                                            Spacing));
6447    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6448                                            Spacing * 2));
6449    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6450                                            Spacing * 3));
6451    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6452    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6453    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6454    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6455    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6456    TmpInst.addOperand(Inst.getOperand(5));
6457    Inst = TmpInst;
6458    return true;
6459  }
6460
6461  // VST3 multiple 3-element structure instructions.
6462  case ARM::VST3dAsm_8:
6463  case ARM::VST3dAsm_16:
6464  case ARM::VST3dAsm_32:
6465  case ARM::VST3qAsm_8:
6466  case ARM::VST3qAsm_16:
6467  case ARM::VST3qAsm_32: {
6468    MCInst TmpInst;
6469    unsigned Spacing;
6470    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6471    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6472    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6473    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6474    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6475                                            Spacing));
6476    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6477                                            Spacing * 2));
6478    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6479    TmpInst.addOperand(Inst.getOperand(4));
6480    Inst = TmpInst;
6481    return true;
6482  }
6483
6484  case ARM::VST3dWB_fixed_Asm_8:
6485  case ARM::VST3dWB_fixed_Asm_16:
6486  case ARM::VST3dWB_fixed_Asm_32:
6487  case ARM::VST3qWB_fixed_Asm_8:
6488  case ARM::VST3qWB_fixed_Asm_16:
6489  case ARM::VST3qWB_fixed_Asm_32: {
6490    MCInst TmpInst;
6491    unsigned Spacing;
6492    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6493    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6494    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6495    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6496    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6497    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6498    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6499                                            Spacing));
6500    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6501                                            Spacing * 2));
6502    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6503    TmpInst.addOperand(Inst.getOperand(4));
6504    Inst = TmpInst;
6505    return true;
6506  }
6507
6508  case ARM::VST3dWB_register_Asm_8:
6509  case ARM::VST3dWB_register_Asm_16:
6510  case ARM::VST3dWB_register_Asm_32:
6511  case ARM::VST3qWB_register_Asm_8:
6512  case ARM::VST3qWB_register_Asm_16:
6513  case ARM::VST3qWB_register_Asm_32: {
6514    MCInst TmpInst;
6515    unsigned Spacing;
6516    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6517    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6518    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6519    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6520    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6521    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6522    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6523                                            Spacing));
6524    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6525                                            Spacing * 2));
6526    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6527    TmpInst.addOperand(Inst.getOperand(5));
6528    Inst = TmpInst;
6529    return true;
6530  }
6531
6532  // VST4 multiple 3-element structure instructions.
6533  case ARM::VST4dAsm_8:
6534  case ARM::VST4dAsm_16:
6535  case ARM::VST4dAsm_32:
6536  case ARM::VST4qAsm_8:
6537  case ARM::VST4qAsm_16:
6538  case ARM::VST4qAsm_32: {
6539    MCInst TmpInst;
6540    unsigned Spacing;
6541    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6542    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6543    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6544    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6545    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6546                                            Spacing));
6547    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6548                                            Spacing * 2));
6549    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6550                                            Spacing * 3));
6551    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6552    TmpInst.addOperand(Inst.getOperand(4));
6553    Inst = TmpInst;
6554    return true;
6555  }
6556
6557  case ARM::VST4dWB_fixed_Asm_8:
6558  case ARM::VST4dWB_fixed_Asm_16:
6559  case ARM::VST4dWB_fixed_Asm_32:
6560  case ARM::VST4qWB_fixed_Asm_8:
6561  case ARM::VST4qWB_fixed_Asm_16:
6562  case ARM::VST4qWB_fixed_Asm_32: {
6563    MCInst TmpInst;
6564    unsigned Spacing;
6565    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6566    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6567    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6568    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6569    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6570    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6571    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6572                                            Spacing));
6573    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6574                                            Spacing * 2));
6575    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6576                                            Spacing * 3));
6577    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6578    TmpInst.addOperand(Inst.getOperand(4));
6579    Inst = TmpInst;
6580    return true;
6581  }
6582
6583  case ARM::VST4dWB_register_Asm_8:
6584  case ARM::VST4dWB_register_Asm_16:
6585  case ARM::VST4dWB_register_Asm_32:
6586  case ARM::VST4qWB_register_Asm_8:
6587  case ARM::VST4qWB_register_Asm_16:
6588  case ARM::VST4qWB_register_Asm_32: {
6589    MCInst TmpInst;
6590    unsigned Spacing;
6591    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6592    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6593    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6594    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6595    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6596    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6597    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6598                                            Spacing));
6599    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6600                                            Spacing * 2));
6601    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6602                                            Spacing * 3));
6603    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6604    TmpInst.addOperand(Inst.getOperand(5));
6605    Inst = TmpInst;
6606    return true;
6607  }
6608
6609  // Handle the Thumb2 mode MOV complex aliases.
6610  case ARM::t2MOVsr:
6611  case ARM::t2MOVSsr: {
6612    // Which instruction to expand to depends on the CCOut operand and
6613    // whether we're in an IT block if the register operands are low
6614    // registers.
6615    bool isNarrow = false;
6616    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6617        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6618        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6619        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6620        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6621      isNarrow = true;
6622    MCInst TmpInst;
6623    unsigned newOpc;
6624    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6625    default: llvm_unreachable("unexpected opcode!");
6626    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6627    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6628    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6629    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6630    }
6631    TmpInst.setOpcode(newOpc);
6632    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6633    if (isNarrow)
6634      TmpInst.addOperand(MCOperand::CreateReg(
6635          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6636    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6637    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6638    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6639    TmpInst.addOperand(Inst.getOperand(5));
6640    if (!isNarrow)
6641      TmpInst.addOperand(MCOperand::CreateReg(
6642          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6643    Inst = TmpInst;
6644    return true;
6645  }
6646  case ARM::t2MOVsi:
6647  case ARM::t2MOVSsi: {
6648    // Which instruction to expand to depends on the CCOut operand and
6649    // whether we're in an IT block if the register operands are low
6650    // registers.
6651    bool isNarrow = false;
6652    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6653        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6654        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6655      isNarrow = true;
6656    MCInst TmpInst;
6657    unsigned newOpc;
6658    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6659    default: llvm_unreachable("unexpected opcode!");
6660    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6661    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6662    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6663    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6664    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6665    }
6666    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6667    if (Ammount == 32) Ammount = 0;
6668    TmpInst.setOpcode(newOpc);
6669    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6670    if (isNarrow)
6671      TmpInst.addOperand(MCOperand::CreateReg(
6672          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6673    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6674    if (newOpc != ARM::t2RRX)
6675      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6676    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6677    TmpInst.addOperand(Inst.getOperand(4));
6678    if (!isNarrow)
6679      TmpInst.addOperand(MCOperand::CreateReg(
6680          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6681    Inst = TmpInst;
6682    return true;
6683  }
6684  // Handle the ARM mode MOV complex aliases.
6685  case ARM::ASRr:
6686  case ARM::LSRr:
6687  case ARM::LSLr:
6688  case ARM::RORr: {
6689    ARM_AM::ShiftOpc ShiftTy;
6690    switch(Inst.getOpcode()) {
6691    default: llvm_unreachable("unexpected opcode!");
6692    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6693    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6694    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6695    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6696    }
6697    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6698    MCInst TmpInst;
6699    TmpInst.setOpcode(ARM::MOVsr);
6700    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6701    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6702    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6703    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6704    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6705    TmpInst.addOperand(Inst.getOperand(4));
6706    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6707    Inst = TmpInst;
6708    return true;
6709  }
6710  case ARM::ASRi:
6711  case ARM::LSRi:
6712  case ARM::LSLi:
6713  case ARM::RORi: {
6714    ARM_AM::ShiftOpc ShiftTy;
6715    switch(Inst.getOpcode()) {
6716    default: llvm_unreachable("unexpected opcode!");
6717    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6718    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6719    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6720    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6721    }
6722    // A shift by zero is a plain MOVr, not a MOVsi.
6723    unsigned Amt = Inst.getOperand(2).getImm();
6724    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6725    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6726    MCInst TmpInst;
6727    TmpInst.setOpcode(Opc);
6728    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6729    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6730    if (Opc == ARM::MOVsi)
6731      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6732    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6733    TmpInst.addOperand(Inst.getOperand(4));
6734    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6735    Inst = TmpInst;
6736    return true;
6737  }
6738  case ARM::RRXi: {
6739    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6740    MCInst TmpInst;
6741    TmpInst.setOpcode(ARM::MOVsi);
6742    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6743    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6744    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6745    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6746    TmpInst.addOperand(Inst.getOperand(3));
6747    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6748    Inst = TmpInst;
6749    return true;
6750  }
6751  case ARM::t2LDMIA_UPD: {
6752    // If this is a load of a single register, then we should use
6753    // a post-indexed LDR instruction instead, per the ARM ARM.
6754    if (Inst.getNumOperands() != 5)
6755      return false;
6756    MCInst TmpInst;
6757    TmpInst.setOpcode(ARM::t2LDR_POST);
6758    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6759    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6760    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6761    TmpInst.addOperand(MCOperand::CreateImm(4));
6762    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6763    TmpInst.addOperand(Inst.getOperand(3));
6764    Inst = TmpInst;
6765    return true;
6766  }
6767  case ARM::t2STMDB_UPD: {
6768    // If this is a store of a single register, then we should use
6769    // a pre-indexed STR instruction instead, per the ARM ARM.
6770    if (Inst.getNumOperands() != 5)
6771      return false;
6772    MCInst TmpInst;
6773    TmpInst.setOpcode(ARM::t2STR_PRE);
6774    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6775    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6776    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6777    TmpInst.addOperand(MCOperand::CreateImm(-4));
6778    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6779    TmpInst.addOperand(Inst.getOperand(3));
6780    Inst = TmpInst;
6781    return true;
6782  }
6783  case ARM::LDMIA_UPD:
6784    // If this is a load of a single register via a 'pop', then we should use
6785    // a post-indexed LDR instruction instead, per the ARM ARM.
6786    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6787        Inst.getNumOperands() == 5) {
6788      MCInst TmpInst;
6789      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6790      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6791      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6792      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6793      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6794      TmpInst.addOperand(MCOperand::CreateImm(4));
6795      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6796      TmpInst.addOperand(Inst.getOperand(3));
6797      Inst = TmpInst;
6798      return true;
6799    }
6800    break;
6801  case ARM::STMDB_UPD:
6802    // If this is a store of a single register via a 'push', then we should use
6803    // a pre-indexed STR instruction instead, per the ARM ARM.
6804    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6805        Inst.getNumOperands() == 5) {
6806      MCInst TmpInst;
6807      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6808      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6809      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6810      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6811      TmpInst.addOperand(MCOperand::CreateImm(-4));
6812      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6813      TmpInst.addOperand(Inst.getOperand(3));
6814      Inst = TmpInst;
6815    }
6816    break;
6817  case ARM::t2ADDri12:
6818    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6819    // mnemonic was used (not "addw"), encoding T3 is preferred.
6820    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6821        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6822      break;
6823    Inst.setOpcode(ARM::t2ADDri);
6824    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6825    break;
6826  case ARM::t2SUBri12:
6827    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6828    // mnemonic was used (not "subw"), encoding T3 is preferred.
6829    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6830        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6831      break;
6832    Inst.setOpcode(ARM::t2SUBri);
6833    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6834    break;
6835  case ARM::tADDi8:
6836    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6837    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6838    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6839    // to encoding T1 if <Rd> is omitted."
6840    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6841      Inst.setOpcode(ARM::tADDi3);
6842      return true;
6843    }
6844    break;
6845  case ARM::tSUBi8:
6846    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6847    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6848    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6849    // to encoding T1 if <Rd> is omitted."
6850    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6851      Inst.setOpcode(ARM::tSUBi3);
6852      return true;
6853    }
6854    break;
6855  case ARM::t2ADDri:
6856  case ARM::t2SUBri: {
6857    // If the destination and first source operand are the same, and
6858    // the flags are compatible with the current IT status, use encoding T2
6859    // instead of T3. For compatibility with the system 'as'. Make sure the
6860    // wide encoding wasn't explicit.
6861    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6862        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
6863        (unsigned)Inst.getOperand(2).getImm() > 255 ||
6864        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
6865        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
6866        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6867         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6868      break;
6869    MCInst TmpInst;
6870    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
6871                      ARM::tADDi8 : ARM::tSUBi8);
6872    TmpInst.addOperand(Inst.getOperand(0));
6873    TmpInst.addOperand(Inst.getOperand(5));
6874    TmpInst.addOperand(Inst.getOperand(0));
6875    TmpInst.addOperand(Inst.getOperand(2));
6876    TmpInst.addOperand(Inst.getOperand(3));
6877    TmpInst.addOperand(Inst.getOperand(4));
6878    Inst = TmpInst;
6879    return true;
6880  }
6881  case ARM::t2ADDrr: {
6882    // If the destination and first source operand are the same, and
6883    // there's no setting of the flags, use encoding T2 instead of T3.
6884    // Note that this is only for ADD, not SUB. This mirrors the system
6885    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6886    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6887        Inst.getOperand(5).getReg() != 0 ||
6888        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6889         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6890      break;
6891    MCInst TmpInst;
6892    TmpInst.setOpcode(ARM::tADDhirr);
6893    TmpInst.addOperand(Inst.getOperand(0));
6894    TmpInst.addOperand(Inst.getOperand(0));
6895    TmpInst.addOperand(Inst.getOperand(2));
6896    TmpInst.addOperand(Inst.getOperand(3));
6897    TmpInst.addOperand(Inst.getOperand(4));
6898    Inst = TmpInst;
6899    return true;
6900  }
6901  case ARM::tB:
6902    // A Thumb conditional branch outside of an IT block is a tBcc.
6903    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6904      Inst.setOpcode(ARM::tBcc);
6905      return true;
6906    }
6907    break;
6908  case ARM::t2B:
6909    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6910    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6911      Inst.setOpcode(ARM::t2Bcc);
6912      return true;
6913    }
6914    break;
6915  case ARM::t2Bcc:
6916    // If the conditional is AL or we're in an IT block, we really want t2B.
6917    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6918      Inst.setOpcode(ARM::t2B);
6919      return true;
6920    }
6921    break;
6922  case ARM::tBcc:
6923    // If the conditional is AL, we really want tB.
6924    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6925      Inst.setOpcode(ARM::tB);
6926      return true;
6927    }
6928    break;
6929  case ARM::tLDMIA: {
6930    // If the register list contains any high registers, or if the writeback
6931    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6932    // instead if we're in Thumb2. Otherwise, this should have generated
6933    // an error in validateInstruction().
6934    unsigned Rn = Inst.getOperand(0).getReg();
6935    bool hasWritebackToken =
6936      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6937       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6938    bool listContainsBase;
6939    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6940        (!listContainsBase && !hasWritebackToken) ||
6941        (listContainsBase && hasWritebackToken)) {
6942      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6943      assert (isThumbTwo());
6944      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6945      // If we're switching to the updating version, we need to insert
6946      // the writeback tied operand.
6947      if (hasWritebackToken)
6948        Inst.insert(Inst.begin(),
6949                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6950      return true;
6951    }
6952    break;
6953  }
6954  case ARM::tSTMIA_UPD: {
6955    // If the register list contains any high registers, we need to use
6956    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6957    // should have generated an error in validateInstruction().
6958    unsigned Rn = Inst.getOperand(0).getReg();
6959    bool listContainsBase;
6960    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6961      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6962      assert (isThumbTwo());
6963      Inst.setOpcode(ARM::t2STMIA_UPD);
6964      return true;
6965    }
6966    break;
6967  }
6968  case ARM::tPOP: {
6969    bool listContainsBase;
6970    // If the register list contains any high registers, we need to use
6971    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6972    // should have generated an error in validateInstruction().
6973    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6974      return false;
6975    assert (isThumbTwo());
6976    Inst.setOpcode(ARM::t2LDMIA_UPD);
6977    // Add the base register and writeback operands.
6978    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6979    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6980    return true;
6981  }
6982  case ARM::tPUSH: {
6983    bool listContainsBase;
6984    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6985      return false;
6986    assert (isThumbTwo());
6987    Inst.setOpcode(ARM::t2STMDB_UPD);
6988    // Add the base register and writeback operands.
6989    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6990    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6991    return true;
6992  }
6993  case ARM::t2MOVi: {
6994    // If we can use the 16-bit encoding and the user didn't explicitly
6995    // request the 32-bit variant, transform it here.
6996    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6997        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
6998        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6999         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7000        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7001        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7002         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7003      // The operands aren't in the same order for tMOVi8...
7004      MCInst TmpInst;
7005      TmpInst.setOpcode(ARM::tMOVi8);
7006      TmpInst.addOperand(Inst.getOperand(0));
7007      TmpInst.addOperand(Inst.getOperand(4));
7008      TmpInst.addOperand(Inst.getOperand(1));
7009      TmpInst.addOperand(Inst.getOperand(2));
7010      TmpInst.addOperand(Inst.getOperand(3));
7011      Inst = TmpInst;
7012      return true;
7013    }
7014    break;
7015  }
7016  case ARM::t2MOVr: {
7017    // If we can use the 16-bit encoding and the user didn't explicitly
7018    // request the 32-bit variant, transform it here.
7019    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7020        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7021        Inst.getOperand(2).getImm() == ARMCC::AL &&
7022        Inst.getOperand(4).getReg() == ARM::CPSR &&
7023        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7024         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7025      // The operands aren't the same for tMOV[S]r... (no cc_out)
7026      MCInst TmpInst;
7027      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7028      TmpInst.addOperand(Inst.getOperand(0));
7029      TmpInst.addOperand(Inst.getOperand(1));
7030      TmpInst.addOperand(Inst.getOperand(2));
7031      TmpInst.addOperand(Inst.getOperand(3));
7032      Inst = TmpInst;
7033      return true;
7034    }
7035    break;
7036  }
7037  case ARM::t2SXTH:
7038  case ARM::t2SXTB:
7039  case ARM::t2UXTH:
7040  case ARM::t2UXTB: {
7041    // If we can use the 16-bit encoding and the user didn't explicitly
7042    // request the 32-bit variant, transform it here.
7043    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7044        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7045        Inst.getOperand(2).getImm() == 0 &&
7046        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7047         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7048      unsigned NewOpc;
7049      switch (Inst.getOpcode()) {
7050      default: llvm_unreachable("Illegal opcode!");
7051      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7052      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7053      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7054      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7055      }
7056      // The operands aren't the same for thumb1 (no rotate operand).
7057      MCInst TmpInst;
7058      TmpInst.setOpcode(NewOpc);
7059      TmpInst.addOperand(Inst.getOperand(0));
7060      TmpInst.addOperand(Inst.getOperand(1));
7061      TmpInst.addOperand(Inst.getOperand(3));
7062      TmpInst.addOperand(Inst.getOperand(4));
7063      Inst = TmpInst;
7064      return true;
7065    }
7066    break;
7067  }
7068  case ARM::MOVsi: {
7069    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7070    if (SOpc == ARM_AM::rrx) return false;
7071    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7072      // Shifting by zero is accepted as a vanilla 'MOVr'
7073      MCInst TmpInst;
7074      TmpInst.setOpcode(ARM::MOVr);
7075      TmpInst.addOperand(Inst.getOperand(0));
7076      TmpInst.addOperand(Inst.getOperand(1));
7077      TmpInst.addOperand(Inst.getOperand(3));
7078      TmpInst.addOperand(Inst.getOperand(4));
7079      TmpInst.addOperand(Inst.getOperand(5));
7080      Inst = TmpInst;
7081      return true;
7082    }
7083    return false;
7084  }
7085  case ARM::ANDrsi:
7086  case ARM::ORRrsi:
7087  case ARM::EORrsi:
7088  case ARM::BICrsi:
7089  case ARM::SUBrsi:
7090  case ARM::ADDrsi: {
7091    unsigned newOpc;
7092    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7093    if (SOpc == ARM_AM::rrx) return false;
7094    switch (Inst.getOpcode()) {
7095    default: llvm_unreachable("unexpected opcode!");
7096    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7097    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7098    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7099    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7100    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7101    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7102    }
7103    // If the shift is by zero, use the non-shifted instruction definition.
7104    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7105      MCInst TmpInst;
7106      TmpInst.setOpcode(newOpc);
7107      TmpInst.addOperand(Inst.getOperand(0));
7108      TmpInst.addOperand(Inst.getOperand(1));
7109      TmpInst.addOperand(Inst.getOperand(2));
7110      TmpInst.addOperand(Inst.getOperand(4));
7111      TmpInst.addOperand(Inst.getOperand(5));
7112      TmpInst.addOperand(Inst.getOperand(6));
7113      Inst = TmpInst;
7114      return true;
7115    }
7116    return false;
7117  }
7118  case ARM::ITasm:
7119  case ARM::t2IT: {
7120    // The mask bits for all but the first condition are represented as
7121    // the low bit of the condition code value implies 't'. We currently
7122    // always have 1 implies 't', so XOR toggle the bits if the low bit
7123    // of the condition code is zero. The encoding also expects the low
7124    // bit of the condition to be encoded as bit 4 of the mask operand,
7125    // so mask that in if needed
7126    MCOperand &MO = Inst.getOperand(1);
7127    unsigned Mask = MO.getImm();
7128    unsigned OrigMask = Mask;
7129    unsigned TZ = CountTrailingZeros_32(Mask);
7130    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7131      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7132      for (unsigned i = 3; i != TZ; --i)
7133        Mask ^= 1 << i;
7134    } else
7135      Mask |= 0x10;
7136    MO.setImm(Mask);
7137
7138    // Set up the IT block state according to the IT instruction we just
7139    // matched.
7140    assert(!inITBlock() && "nested IT blocks?!");
7141    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7142    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7143    ITState.CurPosition = 0;
7144    ITState.FirstCond = true;
7145    break;
7146  }
7147  }
7148  return false;
7149}
7150
7151unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7152  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7153  // suffix depending on whether they're in an IT block or not.
7154  unsigned Opc = Inst.getOpcode();
7155  const MCInstrDesc &MCID = getInstDesc(Opc);
7156  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7157    assert(MCID.hasOptionalDef() &&
7158           "optionally flag setting instruction missing optional def operand");
7159    assert(MCID.NumOperands == Inst.getNumOperands() &&
7160           "operand count mismatch!");
7161    // Find the optional-def operand (cc_out).
7162    unsigned OpNo;
7163    for (OpNo = 0;
7164         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7165         ++OpNo)
7166      ;
7167    // If we're parsing Thumb1, reject it completely.
7168    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7169      return Match_MnemonicFail;
7170    // If we're parsing Thumb2, which form is legal depends on whether we're
7171    // in an IT block.
7172    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7173        !inITBlock())
7174      return Match_RequiresITBlock;
7175    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7176        inITBlock())
7177      return Match_RequiresNotITBlock;
7178  }
7179  // Some high-register supporting Thumb1 encodings only allow both registers
7180  // to be from r0-r7 when in Thumb2.
7181  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7182           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7183           isARMLowRegister(Inst.getOperand(2).getReg()))
7184    return Match_RequiresThumb2;
7185  // Others only require ARMv6 or later.
7186  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7187           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7188           isARMLowRegister(Inst.getOperand(1).getReg()))
7189    return Match_RequiresV6;
7190  return Match_Success;
7191}
7192
7193bool ARMAsmParser::
7194MatchAndEmitInstruction(SMLoc IDLoc,
7195                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7196                        MCStreamer &Out) {
7197  MCInst Inst;
7198  unsigned ErrorInfo;
7199  unsigned MatchResult;
7200  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7201  switch (MatchResult) {
7202  default: break;
7203  case Match_Success:
7204    // Context sensitive operand constraints aren't handled by the matcher,
7205    // so check them here.
7206    if (validateInstruction(Inst, Operands)) {
7207      // Still progress the IT block, otherwise one wrong condition causes
7208      // nasty cascading errors.
7209      forwardITPosition();
7210      return true;
7211    }
7212
7213    // Some instructions need post-processing to, for example, tweak which
7214    // encoding is selected. Loop on it while changes happen so the
7215    // individual transformations can chain off each other. E.g.,
7216    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7217    while (processInstruction(Inst, Operands))
7218      ;
7219
7220    // Only move forward at the very end so that everything in validate
7221    // and process gets a consistent answer about whether we're in an IT
7222    // block.
7223    forwardITPosition();
7224
7225    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7226    // doesn't actually encode.
7227    if (Inst.getOpcode() == ARM::ITasm)
7228      return false;
7229
7230    Inst.setLoc(IDLoc);
7231    Out.EmitInstruction(Inst);
7232    return false;
7233  case Match_MissingFeature:
7234    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7235    return true;
7236  case Match_InvalidOperand: {
7237    SMLoc ErrorLoc = IDLoc;
7238    if (ErrorInfo != ~0U) {
7239      if (ErrorInfo >= Operands.size())
7240        return Error(IDLoc, "too few operands for instruction");
7241
7242      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7243      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7244    }
7245
7246    return Error(ErrorLoc, "invalid operand for instruction");
7247  }
7248  case Match_MnemonicFail:
7249    return Error(IDLoc, "invalid instruction");
7250  case Match_ConversionFail:
7251    // The converter function will have already emited a diagnostic.
7252    return true;
7253  case Match_RequiresNotITBlock:
7254    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7255  case Match_RequiresITBlock:
7256    return Error(IDLoc, "instruction only valid inside IT block");
7257  case Match_RequiresV6:
7258    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7259  case Match_RequiresThumb2:
7260    return Error(IDLoc, "instruction variant requires Thumb2");
7261  }
7262
7263  llvm_unreachable("Implement any new match types added!");
7264}
7265
7266/// parseDirective parses the arm specific directives
7267bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7268  StringRef IDVal = DirectiveID.getIdentifier();
7269  if (IDVal == ".word")
7270    return parseDirectiveWord(4, DirectiveID.getLoc());
7271  else if (IDVal == ".thumb")
7272    return parseDirectiveThumb(DirectiveID.getLoc());
7273  else if (IDVal == ".arm")
7274    return parseDirectiveARM(DirectiveID.getLoc());
7275  else if (IDVal == ".thumb_func")
7276    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7277  else if (IDVal == ".code")
7278    return parseDirectiveCode(DirectiveID.getLoc());
7279  else if (IDVal == ".syntax")
7280    return parseDirectiveSyntax(DirectiveID.getLoc());
7281  else if (IDVal == ".unreq")
7282    return parseDirectiveUnreq(DirectiveID.getLoc());
7283  else if (IDVal == ".arch")
7284    return parseDirectiveArch(DirectiveID.getLoc());
7285  else if (IDVal == ".eabi_attribute")
7286    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7287  return true;
7288}
7289
7290/// parseDirectiveWord
7291///  ::= .word [ expression (, expression)* ]
7292bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7293  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7294    for (;;) {
7295      const MCExpr *Value;
7296      if (getParser().ParseExpression(Value))
7297        return true;
7298
7299      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7300
7301      if (getLexer().is(AsmToken::EndOfStatement))
7302        break;
7303
7304      // FIXME: Improve diagnostic.
7305      if (getLexer().isNot(AsmToken::Comma))
7306        return Error(L, "unexpected token in directive");
7307      Parser.Lex();
7308    }
7309  }
7310
7311  Parser.Lex();
7312  return false;
7313}
7314
7315/// parseDirectiveThumb
7316///  ::= .thumb
7317bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7318  if (getLexer().isNot(AsmToken::EndOfStatement))
7319    return Error(L, "unexpected token in directive");
7320  Parser.Lex();
7321
7322  if (!isThumb())
7323    SwitchMode();
7324  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7325  return false;
7326}
7327
7328/// parseDirectiveARM
7329///  ::= .arm
7330bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7331  if (getLexer().isNot(AsmToken::EndOfStatement))
7332    return Error(L, "unexpected token in directive");
7333  Parser.Lex();
7334
7335  if (isThumb())
7336    SwitchMode();
7337  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7338  return false;
7339}
7340
7341/// parseDirectiveThumbFunc
7342///  ::= .thumbfunc symbol_name
7343bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7344  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7345  bool isMachO = MAI.hasSubsectionsViaSymbols();
7346  StringRef Name;
7347  bool needFuncName = true;
7348
7349  // Darwin asm has (optionally) function name after .thumb_func direction
7350  // ELF doesn't
7351  if (isMachO) {
7352    const AsmToken &Tok = Parser.getTok();
7353    if (Tok.isNot(AsmToken::EndOfStatement)) {
7354      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7355        return Error(L, "unexpected token in .thumb_func directive");
7356      Name = Tok.getIdentifier();
7357      Parser.Lex(); // Consume the identifier token.
7358      needFuncName = false;
7359    }
7360  }
7361
7362  if (getLexer().isNot(AsmToken::EndOfStatement))
7363    return Error(L, "unexpected token in directive");
7364
7365  // Eat the end of statement and any blank lines that follow.
7366  while (getLexer().is(AsmToken::EndOfStatement))
7367    Parser.Lex();
7368
7369  // FIXME: assuming function name will be the line following .thumb_func
7370  // We really should be checking the next symbol definition even if there's
7371  // stuff in between.
7372  if (needFuncName) {
7373    Name = Parser.getTok().getIdentifier();
7374  }
7375
7376  // Mark symbol as a thumb symbol.
7377  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7378  getParser().getStreamer().EmitThumbFunc(Func);
7379  return false;
7380}
7381
7382/// parseDirectiveSyntax
7383///  ::= .syntax unified | divided
7384bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7385  const AsmToken &Tok = Parser.getTok();
7386  if (Tok.isNot(AsmToken::Identifier))
7387    return Error(L, "unexpected token in .syntax directive");
7388  StringRef Mode = Tok.getString();
7389  if (Mode == "unified" || Mode == "UNIFIED")
7390    Parser.Lex();
7391  else if (Mode == "divided" || Mode == "DIVIDED")
7392    return Error(L, "'.syntax divided' arm asssembly not supported");
7393  else
7394    return Error(L, "unrecognized syntax mode in .syntax directive");
7395
7396  if (getLexer().isNot(AsmToken::EndOfStatement))
7397    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7398  Parser.Lex();
7399
7400  // TODO tell the MC streamer the mode
7401  // getParser().getStreamer().Emit???();
7402  return false;
7403}
7404
7405/// parseDirectiveCode
7406///  ::= .code 16 | 32
7407bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7408  const AsmToken &Tok = Parser.getTok();
7409  if (Tok.isNot(AsmToken::Integer))
7410    return Error(L, "unexpected token in .code directive");
7411  int64_t Val = Parser.getTok().getIntVal();
7412  if (Val == 16)
7413    Parser.Lex();
7414  else if (Val == 32)
7415    Parser.Lex();
7416  else
7417    return Error(L, "invalid operand to .code directive");
7418
7419  if (getLexer().isNot(AsmToken::EndOfStatement))
7420    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7421  Parser.Lex();
7422
7423  if (Val == 16) {
7424    if (!isThumb())
7425      SwitchMode();
7426    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7427  } else {
7428    if (isThumb())
7429      SwitchMode();
7430    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7431  }
7432
7433  return false;
7434}
7435
7436/// parseDirectiveReq
7437///  ::= name .req registername
7438bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7439  Parser.Lex(); // Eat the '.req' token.
7440  unsigned Reg;
7441  SMLoc SRegLoc, ERegLoc;
7442  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7443    Parser.EatToEndOfStatement();
7444    return Error(SRegLoc, "register name expected");
7445  }
7446
7447  // Shouldn't be anything else.
7448  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7449    Parser.EatToEndOfStatement();
7450    return Error(Parser.getTok().getLoc(),
7451                 "unexpected input in .req directive.");
7452  }
7453
7454  Parser.Lex(); // Consume the EndOfStatement
7455
7456  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7457    return Error(SRegLoc, "redefinition of '" + Name +
7458                          "' does not match original.");
7459
7460  return false;
7461}
7462
7463/// parseDirectiveUneq
7464///  ::= .unreq registername
7465bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7466  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7467    Parser.EatToEndOfStatement();
7468    return Error(L, "unexpected input in .unreq directive.");
7469  }
7470  RegisterReqs.erase(Parser.getTok().getIdentifier());
7471  Parser.Lex(); // Eat the identifier.
7472  return false;
7473}
7474
7475/// parseDirectiveArch
7476///  ::= .arch token
7477bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7478  return true;
7479}
7480
7481/// parseDirectiveEabiAttr
7482///  ::= .eabi_attribute int, int
7483bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7484  return true;
7485}
7486
7487extern "C" void LLVMInitializeARMAsmLexer();
7488
7489/// Force static initialization.
7490extern "C" void LLVMInitializeARMAsmParser() {
7491  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7492  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7493  LLVMInitializeARMAsmLexer();
7494}
7495
7496#define GET_REGISTER_MATCHER
7497#define GET_MATCHER_IMPLEMENTATION
7498#include "ARMGenAsmMatcher.inc"
7499