ARMAsmParser.cpp revision ceee984302a1cf1d659a186cf94149c779866da5
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
86  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
87
88  int tryParseRegister();
89  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
90  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
93  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
94  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
95  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
96                              unsigned &ShiftAmount);
97  bool parseDirectiveWord(unsigned Size, SMLoc L);
98  bool parseDirectiveThumb(SMLoc L);
99  bool parseDirectiveARM(SMLoc L);
100  bool parseDirectiveThumbFunc(SMLoc L);
101  bool parseDirectiveCode(SMLoc L);
102  bool parseDirectiveSyntax(SMLoc L);
103  bool parseDirectiveReq(StringRef Name, SMLoc L);
104  bool parseDirectiveUnreq(SMLoc L);
105  bool parseDirectiveArch(SMLoc L);
106  bool parseDirectiveEabiAttr(SMLoc L);
107
108  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
109                          bool &CarrySetting, unsigned &ProcessorIMod,
110                          StringRef &ITMask);
111  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
112                             bool &CanAcceptPredicationCode);
113
114  bool isThumb() const {
115    // FIXME: Can tablegen auto-generate this?
116    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
117  }
118  bool isThumbOne() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
120  }
121  bool isThumbTwo() const {
122    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
123  }
124  bool hasV6Ops() const {
125    return STI.getFeatureBits() & ARM::HasV6Ops;
126  }
127  bool hasV7Ops() const {
128    return STI.getFeatureBits() & ARM::HasV7Ops;
129  }
130  void SwitchMode() {
131    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
132    setAvailableFeatures(FB);
133  }
134  bool isMClass() const {
135    return STI.getFeatureBits() & ARM::FeatureMClass;
136  }
137
138  /// @name Auto-generated Match Functions
139  /// {
140
141#define GET_ASSEMBLER_HEADER
142#include "ARMGenAsmMatcher.inc"
143
144  /// }
145
146  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
147  OperandMatchResultTy parseCoprocNumOperand(
148    SmallVectorImpl<MCParsedAsmOperand*>&);
149  OperandMatchResultTy parseCoprocRegOperand(
150    SmallVectorImpl<MCParsedAsmOperand*>&);
151  OperandMatchResultTy parseCoprocOptionOperand(
152    SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseMemBarrierOptOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseProcIFlagsOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseMSRMaskOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
160                                   StringRef Op, int Low, int High);
161  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "lsl", 0, 31);
163  }
164  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
165    return parsePKHImm(O, "asr", 1, 32);
166  }
167  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
176
177  // Asm Match Converter Methods
178  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
179                    const SmallVectorImpl<MCParsedAsmOperand*> &);
180  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
181                    const SmallVectorImpl<MCParsedAsmOperand*> &);
182  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
183                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
184  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
185                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
187                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
197                             const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
199                             const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
201                             const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
205                  const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
207                  const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
209                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
211                        const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
213                     const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
215                        const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
217                     const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
219                        const SmallVectorImpl<MCParsedAsmOperand*> &);
220
221  bool validateInstruction(MCInst &Inst,
222                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
223  bool processInstruction(MCInst &Inst,
224                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
225  bool shouldOmitCCOutOperand(StringRef Mnemonic,
226                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
227
228public:
229  enum ARMMatchResultTy {
230    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
231    Match_RequiresNotITBlock,
232    Match_RequiresV6,
233    Match_RequiresThumb2
234  };
235
236  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
237    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
238    MCAsmParserExtension::Initialize(_Parser);
239
240    // Cache the MCRegisterInfo.
241    MRI = &getContext().getRegisterInfo();
242
243    // Initialize the set of available features.
244    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
245
246    // Not in an ITBlock to start with.
247    ITState.CurPosition = ~0U;
248  }
249
250  // Implementation of the MCTargetAsmParser interface:
251  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
252  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
253                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
254  bool ParseDirective(AsmToken DirectiveID);
255
256  unsigned checkTargetMatchPredicate(MCInst &Inst);
257
258  bool MatchAndEmitInstruction(SMLoc IDLoc,
259                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
260                               MCStreamer &Out);
261};
262} // end anonymous namespace
263
264namespace {
265
266/// ARMOperand - Instances of this class represent a parsed ARM machine
267/// instruction.
268class ARMOperand : public MCParsedAsmOperand {
269  enum KindTy {
270    k_CondCode,
271    k_CCOut,
272    k_ITCondMask,
273    k_CoprocNum,
274    k_CoprocReg,
275    k_CoprocOption,
276    k_Immediate,
277    k_MemBarrierOpt,
278    k_Memory,
279    k_PostIndexRegister,
280    k_MSRMask,
281    k_ProcIFlags,
282    k_VectorIndex,
283    k_Register,
284    k_RegisterList,
285    k_DPRRegisterList,
286    k_SPRRegisterList,
287    k_VectorList,
288    k_VectorListAllLanes,
289    k_VectorListIndexed,
290    k_ShiftedRegister,
291    k_ShiftedImmediate,
292    k_ShifterImmediate,
293    k_RotateImmediate,
294    k_BitfieldDescriptor,
295    k_Token
296  } Kind;
297
298  SMLoc StartLoc, EndLoc;
299  SmallVector<unsigned, 8> Registers;
300
301  union {
302    struct {
303      ARMCC::CondCodes Val;
304    } CC;
305
306    struct {
307      unsigned Val;
308    } Cop;
309
310    struct {
311      unsigned Val;
312    } CoprocOption;
313
314    struct {
315      unsigned Mask:4;
316    } ITMask;
317
318    struct {
319      ARM_MB::MemBOpt Val;
320    } MBOpt;
321
322    struct {
323      ARM_PROC::IFlags Val;
324    } IFlags;
325
326    struct {
327      unsigned Val;
328    } MMask;
329
330    struct {
331      const char *Data;
332      unsigned Length;
333    } Tok;
334
335    struct {
336      unsigned RegNum;
337    } Reg;
338
339    // A vector register list is a sequential list of 1 to 4 registers.
340    struct {
341      unsigned RegNum;
342      unsigned Count;
343      unsigned LaneIndex;
344      bool isDoubleSpaced;
345    } VectorList;
346
347    struct {
348      unsigned Val;
349    } VectorIndex;
350
351    struct {
352      const MCExpr *Val;
353    } Imm;
354
355    /// Combined record for all forms of ARM address expressions.
356    struct {
357      unsigned BaseRegNum;
358      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
359      // was specified.
360      const MCConstantExpr *OffsetImm;  // Offset immediate value
361      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
362      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
363      unsigned ShiftImm;        // shift for OffsetReg.
364      unsigned Alignment;       // 0 = no alignment specified
365                                // n = alignment in bytes (2, 4, 8, 16, or 32)
366      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
367    } Memory;
368
369    struct {
370      unsigned RegNum;
371      bool isAdd;
372      ARM_AM::ShiftOpc ShiftTy;
373      unsigned ShiftImm;
374    } PostIdxReg;
375
376    struct {
377      bool isASR;
378      unsigned Imm;
379    } ShifterImm;
380    struct {
381      ARM_AM::ShiftOpc ShiftTy;
382      unsigned SrcReg;
383      unsigned ShiftReg;
384      unsigned ShiftImm;
385    } RegShiftedReg;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftImm;
390    } RegShiftedImm;
391    struct {
392      unsigned Imm;
393    } RotImm;
394    struct {
395      unsigned LSB;
396      unsigned Width;
397    } Bitfield;
398  };
399
400  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
401public:
402  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
403    Kind = o.Kind;
404    StartLoc = o.StartLoc;
405    EndLoc = o.EndLoc;
406    switch (Kind) {
407    case k_CondCode:
408      CC = o.CC;
409      break;
410    case k_ITCondMask:
411      ITMask = o.ITMask;
412      break;
413    case k_Token:
414      Tok = o.Tok;
415      break;
416    case k_CCOut:
417    case k_Register:
418      Reg = o.Reg;
419      break;
420    case k_RegisterList:
421    case k_DPRRegisterList:
422    case k_SPRRegisterList:
423      Registers = o.Registers;
424      break;
425    case k_VectorList:
426    case k_VectorListAllLanes:
427    case k_VectorListIndexed:
428      VectorList = o.VectorList;
429      break;
430    case k_CoprocNum:
431    case k_CoprocReg:
432      Cop = o.Cop;
433      break;
434    case k_CoprocOption:
435      CoprocOption = o.CoprocOption;
436      break;
437    case k_Immediate:
438      Imm = o.Imm;
439      break;
440    case k_MemBarrierOpt:
441      MBOpt = o.MBOpt;
442      break;
443    case k_Memory:
444      Memory = o.Memory;
445      break;
446    case k_PostIndexRegister:
447      PostIdxReg = o.PostIdxReg;
448      break;
449    case k_MSRMask:
450      MMask = o.MMask;
451      break;
452    case k_ProcIFlags:
453      IFlags = o.IFlags;
454      break;
455    case k_ShifterImmediate:
456      ShifterImm = o.ShifterImm;
457      break;
458    case k_ShiftedRegister:
459      RegShiftedReg = o.RegShiftedReg;
460      break;
461    case k_ShiftedImmediate:
462      RegShiftedImm = o.RegShiftedImm;
463      break;
464    case k_RotateImmediate:
465      RotImm = o.RotImm;
466      break;
467    case k_BitfieldDescriptor:
468      Bitfield = o.Bitfield;
469      break;
470    case k_VectorIndex:
471      VectorIndex = o.VectorIndex;
472      break;
473    }
474  }
475
476  /// getStartLoc - Get the location of the first token of this operand.
477  SMLoc getStartLoc() const { return StartLoc; }
478  /// getEndLoc - Get the location of the last token of this operand.
479  SMLoc getEndLoc() const { return EndLoc; }
480
481  ARMCC::CondCodes getCondCode() const {
482    assert(Kind == k_CondCode && "Invalid access!");
483    return CC.Val;
484  }
485
486  unsigned getCoproc() const {
487    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
488    return Cop.Val;
489  }
490
491  StringRef getToken() const {
492    assert(Kind == k_Token && "Invalid access!");
493    return StringRef(Tok.Data, Tok.Length);
494  }
495
496  unsigned getReg() const {
497    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
498    return Reg.RegNum;
499  }
500
501  const SmallVectorImpl<unsigned> &getRegList() const {
502    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
503            Kind == k_SPRRegisterList) && "Invalid access!");
504    return Registers;
505  }
506
507  const MCExpr *getImm() const {
508    assert(isImm() && "Invalid access!");
509    return Imm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const {
541    if (!isImm()) return false;
542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
543    if (!CE) return false;
544    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
545    return Val != -1;
546  }
547  bool isFBits16() const {
548    if (!isImm()) return false;
549    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
550    if (!CE) return false;
551    int64_t Value = CE->getValue();
552    return Value >= 0 && Value <= 16;
553  }
554  bool isFBits32() const {
555    if (!isImm()) return false;
556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
557    if (!CE) return false;
558    int64_t Value = CE->getValue();
559    return Value >= 1 && Value <= 32;
560  }
561  bool isImm8s4() const {
562    if (!isImm()) return false;
563    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
564    if (!CE) return false;
565    int64_t Value = CE->getValue();
566    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
567  }
568  bool isImm0_1020s4() const {
569    if (!isImm()) return false;
570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
571    if (!CE) return false;
572    int64_t Value = CE->getValue();
573    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
574  }
575  bool isImm0_508s4() const {
576    if (!isImm()) return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
581  }
582  bool isImm0_255() const {
583    if (!isImm()) return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = CE->getValue();
587    return Value >= 0 && Value < 256;
588  }
589  bool isImm0_1() const {
590    if (!isImm()) return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 2;
595  }
596  bool isImm0_3() const {
597    if (!isImm()) return false;
598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
599    if (!CE) return false;
600    int64_t Value = CE->getValue();
601    return Value >= 0 && Value < 4;
602  }
603  bool isImm0_7() const {
604    if (!isImm()) return false;
605    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
606    if (!CE) return false;
607    int64_t Value = CE->getValue();
608    return Value >= 0 && Value < 8;
609  }
610  bool isImm0_15() const {
611    if (!isImm()) return false;
612    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
613    if (!CE) return false;
614    int64_t Value = CE->getValue();
615    return Value >= 0 && Value < 16;
616  }
617  bool isImm0_31() const {
618    if (!isImm()) return false;
619    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
620    if (!CE) return false;
621    int64_t Value = CE->getValue();
622    return Value >= 0 && Value < 32;
623  }
624  bool isImm0_63() const {
625    if (!isImm()) return false;
626    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
627    if (!CE) return false;
628    int64_t Value = CE->getValue();
629    return Value >= 0 && Value < 64;
630  }
631  bool isImm8() const {
632    if (!isImm()) return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 8;
637  }
638  bool isImm16() const {
639    if (!isImm()) return false;
640    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
641    if (!CE) return false;
642    int64_t Value = CE->getValue();
643    return Value == 16;
644  }
645  bool isImm32() const {
646    if (!isImm()) return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (!isImm()) return false;
654    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
655    if (!CE) return false;
656    int64_t Value = CE->getValue();
657    return Value > 0 && Value <= 8;
658  }
659  bool isShrImm16() const {
660    if (!isImm()) return false;
661    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
662    if (!CE) return false;
663    int64_t Value = CE->getValue();
664    return Value > 0 && Value <= 16;
665  }
666  bool isShrImm32() const {
667    if (!isImm()) return false;
668    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
669    if (!CE) return false;
670    int64_t Value = CE->getValue();
671    return Value > 0 && Value <= 32;
672  }
673  bool isShrImm64() const {
674    if (!isImm()) return false;
675    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
676    if (!CE) return false;
677    int64_t Value = CE->getValue();
678    return Value > 0 && Value <= 64;
679  }
680  bool isImm1_7() const {
681    if (!isImm()) return false;
682    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683    if (!CE) return false;
684    int64_t Value = CE->getValue();
685    return Value > 0 && Value < 8;
686  }
687  bool isImm1_15() const {
688    if (!isImm()) return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value > 0 && Value < 16;
693  }
694  bool isImm1_31() const {
695    if (!isImm()) return false;
696    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
697    if (!CE) return false;
698    int64_t Value = CE->getValue();
699    return Value > 0 && Value < 32;
700  }
701  bool isImm1_16() const {
702    if (!isImm()) return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 17;
707  }
708  bool isImm1_32() const {
709    if (!isImm()) return false;
710    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
711    if (!CE) return false;
712    int64_t Value = CE->getValue();
713    return Value > 0 && Value < 33;
714  }
715  bool isImm0_32() const {
716    if (!isImm()) return false;
717    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
718    if (!CE) return false;
719    int64_t Value = CE->getValue();
720    return Value >= 0 && Value < 33;
721  }
722  bool isImm0_65535() const {
723    if (!isImm()) return false;
724    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
725    if (!CE) return false;
726    int64_t Value = CE->getValue();
727    return Value >= 0 && Value < 65536;
728  }
729  bool isImm0_65535Expr() const {
730    if (!isImm()) return false;
731    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
732    // If it's not a constant expression, it'll generate a fixup and be
733    // handled later.
734    if (!CE) return true;
735    int64_t Value = CE->getValue();
736    return Value >= 0 && Value < 65536;
737  }
738  bool isImm24bit() const {
739    if (!isImm()) return false;
740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
741    if (!CE) return false;
742    int64_t Value = CE->getValue();
743    return Value >= 0 && Value <= 0xffffff;
744  }
745  bool isImmThumbSR() const {
746    if (!isImm()) return false;
747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748    if (!CE) return false;
749    int64_t Value = CE->getValue();
750    return Value > 0 && Value < 33;
751  }
752  bool isPKHLSLImm() const {
753    if (!isImm()) return false;
754    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755    if (!CE) return false;
756    int64_t Value = CE->getValue();
757    return Value >= 0 && Value < 32;
758  }
759  bool isPKHASRImm() const {
760    if (!isImm()) return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value <= 32;
765  }
766  bool isARMSOImm() const {
767    if (!isImm()) return false;
768    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
769    if (!CE) return false;
770    int64_t Value = CE->getValue();
771    return ARM_AM::getSOImmVal(Value) != -1;
772  }
773  bool isARMSOImmNot() const {
774    if (!isImm()) return false;
775    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
776    if (!CE) return false;
777    int64_t Value = CE->getValue();
778    return ARM_AM::getSOImmVal(~Value) != -1;
779  }
780  bool isARMSOImmNeg() const {
781    if (!isImm()) return false;
782    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783    if (!CE) return false;
784    int64_t Value = CE->getValue();
785    return ARM_AM::getSOImmVal(-Value) != -1;
786  }
787  bool isT2SOImm() const {
788    if (!isImm()) return false;
789    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
790    if (!CE) return false;
791    int64_t Value = CE->getValue();
792    return ARM_AM::getT2SOImmVal(Value) != -1;
793  }
794  bool isT2SOImmNot() const {
795    if (!isImm()) return false;
796    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
797    if (!CE) return false;
798    int64_t Value = CE->getValue();
799    return ARM_AM::getT2SOImmVal(~Value) != -1;
800  }
801  bool isT2SOImmNeg() const {
802    if (!isImm()) return false;
803    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
804    if (!CE) return false;
805    int64_t Value = CE->getValue();
806    return ARM_AM::getT2SOImmVal(-Value) != -1;
807  }
808  bool isSetEndImm() const {
809    if (!isImm()) return false;
810    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
811    if (!CE) return false;
812    int64_t Value = CE->getValue();
813    return Value == 1 || Value == 0;
814  }
815  bool isReg() const { return Kind == k_Register; }
816  bool isRegList() const { return Kind == k_RegisterList; }
817  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
818  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
819  bool isToken() const { return Kind == k_Token; }
820  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
821  bool isMemory() const { return Kind == k_Memory; }
822  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
823  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
824  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
825  bool isRotImm() const { return Kind == k_RotateImmediate; }
826  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
827  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
828  bool isPostIdxReg() const {
829    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
830  }
831  bool isMemNoOffset(bool alignOK = false) const {
832    if (!isMemory())
833      return false;
834    // No offset of any kind.
835    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
836     (alignOK || Memory.Alignment == 0);
837  }
838  bool isMemPCRelImm12() const {
839    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
840      return false;
841    // Base register must be PC.
842    if (Memory.BaseRegNum != ARM::PC)
843      return false;
844    // Immediate offset in range [-4095, 4095].
845    if (!Memory.OffsetImm) return true;
846    int64_t Val = Memory.OffsetImm->getValue();
847    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
848  }
849  bool isAlignedMemory() const {
850    return isMemNoOffset(true);
851  }
852  bool isAddrMode2() const {
853    if (!isMemory() || Memory.Alignment != 0) return false;
854    // Check for register offset.
855    if (Memory.OffsetRegNum) return true;
856    // Immediate offset in range [-4095, 4095].
857    if (!Memory.OffsetImm) return true;
858    int64_t Val = Memory.OffsetImm->getValue();
859    return Val > -4096 && Val < 4096;
860  }
861  bool isAM2OffsetImm() const {
862    if (!isImm()) return false;
863    // Immediate offset in range [-4095, 4095].
864    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
865    if (!CE) return false;
866    int64_t Val = CE->getValue();
867    return Val > -4096 && Val < 4096;
868  }
869  bool isAddrMode3() const {
870    // If we have an immediate that's not a constant, treat it as a label
871    // reference needing a fixup. If it is a constant, it's something else
872    // and we reject it.
873    if (isImm() && !isa<MCConstantExpr>(getImm()))
874      return true;
875    if (!isMemory() || Memory.Alignment != 0) return false;
876    // No shifts are legal for AM3.
877    if (Memory.ShiftType != ARM_AM::no_shift) return false;
878    // Check for register offset.
879    if (Memory.OffsetRegNum) return true;
880    // Immediate offset in range [-255, 255].
881    if (!Memory.OffsetImm) return true;
882    int64_t Val = Memory.OffsetImm->getValue();
883    return Val > -256 && Val < 256;
884  }
885  bool isAM3Offset() const {
886    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
887      return false;
888    if (Kind == k_PostIndexRegister)
889      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
890    // Immediate offset in range [-255, 255].
891    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
892    if (!CE) return false;
893    int64_t Val = CE->getValue();
894    // Special case, #-0 is INT32_MIN.
895    return (Val > -256 && Val < 256) || Val == INT32_MIN;
896  }
897  bool isAddrMode5() const {
898    // If we have an immediate that's not a constant, treat it as a label
899    // reference needing a fixup. If it is a constant, it's something else
900    // and we reject it.
901    if (isImm() && !isa<MCConstantExpr>(getImm()))
902      return true;
903    if (!isMemory() || Memory.Alignment != 0) return false;
904    // Check for register offset.
905    if (Memory.OffsetRegNum) return false;
906    // Immediate offset in range [-1020, 1020] and a multiple of 4.
907    if (!Memory.OffsetImm) return true;
908    int64_t Val = Memory.OffsetImm->getValue();
909    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
910      Val == INT32_MIN;
911  }
912  bool isMemTBB() const {
913    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
914        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
915      return false;
916    return true;
917  }
918  bool isMemTBH() const {
919    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
920        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
921        Memory.Alignment != 0 )
922      return false;
923    return true;
924  }
925  bool isMemRegOffset() const {
926    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
927      return false;
928    return true;
929  }
930  bool isT2MemRegOffset() const {
931    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
932        Memory.Alignment != 0)
933      return false;
934    // Only lsl #{0, 1, 2, 3} allowed.
935    if (Memory.ShiftType == ARM_AM::no_shift)
936      return true;
937    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
938      return false;
939    return true;
940  }
941  bool isMemThumbRR() const {
942    // Thumb reg+reg addressing is simple. Just two registers, a base and
943    // an offset. No shifts, negations or any other complicating factors.
944    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
945        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
946      return false;
947    return isARMLowRegister(Memory.BaseRegNum) &&
948      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
949  }
950  bool isMemThumbRIs4() const {
951    if (!isMemory() || Memory.OffsetRegNum != 0 ||
952        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
953      return false;
954    // Immediate offset, multiple of 4 in range [0, 124].
955    if (!Memory.OffsetImm) return true;
956    int64_t Val = Memory.OffsetImm->getValue();
957    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
958  }
959  bool isMemThumbRIs2() const {
960    if (!isMemory() || Memory.OffsetRegNum != 0 ||
961        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
962      return false;
963    // Immediate offset, multiple of 4 in range [0, 62].
964    if (!Memory.OffsetImm) return true;
965    int64_t Val = Memory.OffsetImm->getValue();
966    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
967  }
968  bool isMemThumbRIs1() const {
969    if (!isMemory() || Memory.OffsetRegNum != 0 ||
970        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
971      return false;
972    // Immediate offset in range [0, 31].
973    if (!Memory.OffsetImm) return true;
974    int64_t Val = Memory.OffsetImm->getValue();
975    return Val >= 0 && Val <= 31;
976  }
977  bool isMemThumbSPI() const {
978    if (!isMemory() || Memory.OffsetRegNum != 0 ||
979        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
980      return false;
981    // Immediate offset, multiple of 4 in range [0, 1020].
982    if (!Memory.OffsetImm) return true;
983    int64_t Val = Memory.OffsetImm->getValue();
984    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
985  }
986  bool isMemImm8s4Offset() const {
987    // If we have an immediate that's not a constant, treat it as a label
988    // reference needing a fixup. If it is a constant, it's something else
989    // and we reject it.
990    if (isImm() && !isa<MCConstantExpr>(getImm()))
991      return true;
992    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
993      return false;
994    // Immediate offset a multiple of 4 in range [-1020, 1020].
995    if (!Memory.OffsetImm) return true;
996    int64_t Val = Memory.OffsetImm->getValue();
997    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
998  }
999  bool isMemImm0_1020s4Offset() const {
1000    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1001      return false;
1002    // Immediate offset a multiple of 4 in range [0, 1020].
1003    if (!Memory.OffsetImm) return true;
1004    int64_t Val = Memory.OffsetImm->getValue();
1005    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1006  }
1007  bool isMemImm8Offset() const {
1008    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1009      return false;
1010    // Base reg of PC isn't allowed for these encodings.
1011    if (Memory.BaseRegNum == ARM::PC) return false;
1012    // Immediate offset in range [-255, 255].
1013    if (!Memory.OffsetImm) return true;
1014    int64_t Val = Memory.OffsetImm->getValue();
1015    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1016  }
1017  bool isMemPosImm8Offset() const {
1018    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1019      return false;
1020    // Immediate offset in range [0, 255].
1021    if (!Memory.OffsetImm) return true;
1022    int64_t Val = Memory.OffsetImm->getValue();
1023    return Val >= 0 && Val < 256;
1024  }
1025  bool isMemNegImm8Offset() const {
1026    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1027      return false;
1028    // Base reg of PC isn't allowed for these encodings.
1029    if (Memory.BaseRegNum == ARM::PC) return false;
1030    // Immediate offset in range [-255, -1].
1031    if (!Memory.OffsetImm) return false;
1032    int64_t Val = Memory.OffsetImm->getValue();
1033    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1034  }
1035  bool isMemUImm12Offset() const {
1036    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1037      return false;
1038    // Immediate offset in range [0, 4095].
1039    if (!Memory.OffsetImm) return true;
1040    int64_t Val = Memory.OffsetImm->getValue();
1041    return (Val >= 0 && Val < 4096);
1042  }
1043  bool isMemImm12Offset() const {
1044    // If we have an immediate that's not a constant, treat it as a label
1045    // reference needing a fixup. If it is a constant, it's something else
1046    // and we reject it.
1047    if (isImm() && !isa<MCConstantExpr>(getImm()))
1048      return true;
1049
1050    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1051      return false;
1052    // Immediate offset in range [-4095, 4095].
1053    if (!Memory.OffsetImm) return true;
1054    int64_t Val = Memory.OffsetImm->getValue();
1055    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1056  }
1057  bool isPostIdxImm8() const {
1058    if (!isImm()) return false;
1059    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1060    if (!CE) return false;
1061    int64_t Val = CE->getValue();
1062    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1063  }
1064  bool isPostIdxImm8s4() const {
1065    if (!isImm()) return false;
1066    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1067    if (!CE) return false;
1068    int64_t Val = CE->getValue();
1069    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1070      (Val == INT32_MIN);
1071  }
1072
1073  bool isMSRMask() const { return Kind == k_MSRMask; }
1074  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1075
1076  // NEON operands.
1077  bool isSingleSpacedVectorList() const {
1078    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1079  }
1080  bool isDoubleSpacedVectorList() const {
1081    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1082  }
1083  bool isVecListOneD() const {
1084    if (!isSingleSpacedVectorList()) return false;
1085    return VectorList.Count == 1;
1086  }
1087
1088  bool isVecListDPair() const {
1089    if (!isSingleSpacedVectorList()) return false;
1090    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1091              .contains(VectorList.RegNum));
1092  }
1093
1094  bool isVecListThreeD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 3;
1097  }
1098
1099  bool isVecListFourD() const {
1100    if (!isSingleSpacedVectorList()) return false;
1101    return VectorList.Count == 4;
1102  }
1103
1104  bool isVecListDPairSpaced() const {
1105    if (!isSingleSpacedVectorList()) return false;
1106    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1107              .contains(VectorList.RegNum));
1108  }
1109
1110  bool isVecListThreeQ() const {
1111    if (!isDoubleSpacedVectorList()) return false;
1112    return VectorList.Count == 3;
1113  }
1114
1115  bool isVecListFourQ() const {
1116    if (!isDoubleSpacedVectorList()) return false;
1117    return VectorList.Count == 4;
1118  }
1119
1120  bool isSingleSpacedVectorAllLanes() const {
1121    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1122  }
1123  bool isDoubleSpacedVectorAllLanes() const {
1124    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1125  }
1126  bool isVecListOneDAllLanes() const {
1127    if (!isSingleSpacedVectorAllLanes()) return false;
1128    return VectorList.Count == 1;
1129  }
1130
1131  bool isVecListDPairAllLanes() const {
1132    if (!isSingleSpacedVectorAllLanes()) return false;
1133    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1134              .contains(VectorList.RegNum));
1135  }
1136
1137  bool isVecListDPairSpacedAllLanes() const {
1138    if (!isDoubleSpacedVectorAllLanes()) return false;
1139    return VectorList.Count == 2;
1140  }
1141
1142  bool isVecListThreeDAllLanes() const {
1143    if (!isSingleSpacedVectorAllLanes()) return false;
1144    return VectorList.Count == 3;
1145  }
1146
1147  bool isVecListThreeQAllLanes() const {
1148    if (!isDoubleSpacedVectorAllLanes()) return false;
1149    return VectorList.Count == 3;
1150  }
1151
1152  bool isVecListFourDAllLanes() const {
1153    if (!isSingleSpacedVectorAllLanes()) return false;
1154    return VectorList.Count == 4;
1155  }
1156
1157  bool isVecListFourQAllLanes() const {
1158    if (!isDoubleSpacedVectorAllLanes()) return false;
1159    return VectorList.Count == 4;
1160  }
1161
1162  bool isSingleSpacedVectorIndexed() const {
1163    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1164  }
1165  bool isDoubleSpacedVectorIndexed() const {
1166    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1167  }
1168  bool isVecListOneDByteIndexed() const {
1169    if (!isSingleSpacedVectorIndexed()) return false;
1170    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1171  }
1172
1173  bool isVecListOneDHWordIndexed() const {
1174    if (!isSingleSpacedVectorIndexed()) return false;
1175    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1176  }
1177
1178  bool isVecListOneDWordIndexed() const {
1179    if (!isSingleSpacedVectorIndexed()) return false;
1180    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1181  }
1182
1183  bool isVecListTwoDByteIndexed() const {
1184    if (!isSingleSpacedVectorIndexed()) return false;
1185    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1186  }
1187
1188  bool isVecListTwoDHWordIndexed() const {
1189    if (!isSingleSpacedVectorIndexed()) return false;
1190    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1191  }
1192
1193  bool isVecListTwoQWordIndexed() const {
1194    if (!isDoubleSpacedVectorIndexed()) return false;
1195    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1196  }
1197
1198  bool isVecListTwoQHWordIndexed() const {
1199    if (!isDoubleSpacedVectorIndexed()) return false;
1200    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1201  }
1202
1203  bool isVecListTwoDWordIndexed() const {
1204    if (!isSingleSpacedVectorIndexed()) return false;
1205    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1206  }
1207
1208  bool isVecListThreeDByteIndexed() const {
1209    if (!isSingleSpacedVectorIndexed()) return false;
1210    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1211  }
1212
1213  bool isVecListThreeDHWordIndexed() const {
1214    if (!isSingleSpacedVectorIndexed()) return false;
1215    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1216  }
1217
1218  bool isVecListThreeQWordIndexed() const {
1219    if (!isDoubleSpacedVectorIndexed()) return false;
1220    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1221  }
1222
1223  bool isVecListThreeQHWordIndexed() const {
1224    if (!isDoubleSpacedVectorIndexed()) return false;
1225    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1226  }
1227
1228  bool isVecListThreeDWordIndexed() const {
1229    if (!isSingleSpacedVectorIndexed()) return false;
1230    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1231  }
1232
1233  bool isVecListFourDByteIndexed() const {
1234    if (!isSingleSpacedVectorIndexed()) return false;
1235    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1236  }
1237
1238  bool isVecListFourDHWordIndexed() const {
1239    if (!isSingleSpacedVectorIndexed()) return false;
1240    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1241  }
1242
1243  bool isVecListFourQWordIndexed() const {
1244    if (!isDoubleSpacedVectorIndexed()) return false;
1245    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1246  }
1247
1248  bool isVecListFourQHWordIndexed() const {
1249    if (!isDoubleSpacedVectorIndexed()) return false;
1250    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1251  }
1252
1253  bool isVecListFourDWordIndexed() const {
1254    if (!isSingleSpacedVectorIndexed()) return false;
1255    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1256  }
1257
1258  bool isVectorIndex8() const {
1259    if (Kind != k_VectorIndex) return false;
1260    return VectorIndex.Val < 8;
1261  }
1262  bool isVectorIndex16() const {
1263    if (Kind != k_VectorIndex) return false;
1264    return VectorIndex.Val < 4;
1265  }
1266  bool isVectorIndex32() const {
1267    if (Kind != k_VectorIndex) return false;
1268    return VectorIndex.Val < 2;
1269  }
1270
1271  bool isNEONi8splat() const {
1272    if (!isImm()) return false;
1273    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1274    // Must be a constant.
1275    if (!CE) return false;
1276    int64_t Value = CE->getValue();
1277    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1278    // value.
1279    return Value >= 0 && Value < 256;
1280  }
1281
1282  bool isNEONi16splat() const {
1283    if (!isImm()) return false;
1284    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1285    // Must be a constant.
1286    if (!CE) return false;
1287    int64_t Value = CE->getValue();
1288    // i16 value in the range [0,255] or [0x0100, 0xff00]
1289    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1290  }
1291
1292  bool isNEONi32splat() const {
1293    if (!isImm()) return false;
1294    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1295    // Must be a constant.
1296    if (!CE) return false;
1297    int64_t Value = CE->getValue();
1298    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1299    return (Value >= 0 && Value < 256) ||
1300      (Value >= 0x0100 && Value <= 0xff00) ||
1301      (Value >= 0x010000 && Value <= 0xff0000) ||
1302      (Value >= 0x01000000 && Value <= 0xff000000);
1303  }
1304
1305  bool isNEONi32vmov() const {
1306    if (!isImm()) return false;
1307    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1308    // Must be a constant.
1309    if (!CE) return false;
1310    int64_t Value = CE->getValue();
1311    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1312    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1313    return (Value >= 0 && Value < 256) ||
1314      (Value >= 0x0100 && Value <= 0xff00) ||
1315      (Value >= 0x010000 && Value <= 0xff0000) ||
1316      (Value >= 0x01000000 && Value <= 0xff000000) ||
1317      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1318      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1319  }
1320  bool isNEONi32vmovNeg() const {
1321    if (!isImm()) return false;
1322    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1323    // Must be a constant.
1324    if (!CE) return false;
1325    int64_t Value = ~CE->getValue();
1326    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1327    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1328    return (Value >= 0 && Value < 256) ||
1329      (Value >= 0x0100 && Value <= 0xff00) ||
1330      (Value >= 0x010000 && Value <= 0xff0000) ||
1331      (Value >= 0x01000000 && Value <= 0xff000000) ||
1332      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1333      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1334  }
1335
1336  bool isNEONi64splat() const {
1337    if (!isImm()) return false;
1338    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1339    // Must be a constant.
1340    if (!CE) return false;
1341    uint64_t Value = CE->getValue();
1342    // i64 value with each byte being either 0 or 0xff.
1343    for (unsigned i = 0; i < 8; ++i)
1344      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1345    return true;
1346  }
1347
1348  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1349    // Add as immediates when possible.  Null MCExpr = 0.
1350    if (Expr == 0)
1351      Inst.addOperand(MCOperand::CreateImm(0));
1352    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1353      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1354    else
1355      Inst.addOperand(MCOperand::CreateExpr(Expr));
1356  }
1357
1358  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1359    assert(N == 2 && "Invalid number of operands!");
1360    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1361    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1362    Inst.addOperand(MCOperand::CreateReg(RegNum));
1363  }
1364
1365  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1366    assert(N == 1 && "Invalid number of operands!");
1367    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1368  }
1369
1370  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1371    assert(N == 1 && "Invalid number of operands!");
1372    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1373  }
1374
1375  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1376    assert(N == 1 && "Invalid number of operands!");
1377    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1378  }
1379
1380  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1381    assert(N == 1 && "Invalid number of operands!");
1382    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1383  }
1384
1385  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1386    assert(N == 1 && "Invalid number of operands!");
1387    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1388  }
1389
1390  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1391    assert(N == 1 && "Invalid number of operands!");
1392    Inst.addOperand(MCOperand::CreateReg(getReg()));
1393  }
1394
1395  void addRegOperands(MCInst &Inst, unsigned N) const {
1396    assert(N == 1 && "Invalid number of operands!");
1397    Inst.addOperand(MCOperand::CreateReg(getReg()));
1398  }
1399
1400  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1401    assert(N == 3 && "Invalid number of operands!");
1402    assert(isRegShiftedReg() &&
1403           "addRegShiftedRegOperands() on non RegShiftedReg!");
1404    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1405    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1406    Inst.addOperand(MCOperand::CreateImm(
1407      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1408  }
1409
1410  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1411    assert(N == 2 && "Invalid number of operands!");
1412    assert(isRegShiftedImm() &&
1413           "addRegShiftedImmOperands() on non RegShiftedImm!");
1414    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1415    Inst.addOperand(MCOperand::CreateImm(
1416      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1417  }
1418
1419  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1420    assert(N == 1 && "Invalid number of operands!");
1421    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1422                                         ShifterImm.Imm));
1423  }
1424
1425  void addRegListOperands(MCInst &Inst, unsigned N) const {
1426    assert(N == 1 && "Invalid number of operands!");
1427    const SmallVectorImpl<unsigned> &RegList = getRegList();
1428    for (SmallVectorImpl<unsigned>::const_iterator
1429           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1430      Inst.addOperand(MCOperand::CreateReg(*I));
1431  }
1432
1433  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1434    addRegListOperands(Inst, N);
1435  }
1436
1437  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1438    addRegListOperands(Inst, N);
1439  }
1440
1441  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1442    assert(N == 1 && "Invalid number of operands!");
1443    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1444    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1445  }
1446
1447  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1448    assert(N == 1 && "Invalid number of operands!");
1449    // Munge the lsb/width into a bitfield mask.
1450    unsigned lsb = Bitfield.LSB;
1451    unsigned width = Bitfield.Width;
1452    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1453    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1454                      (32 - (lsb + width)));
1455    Inst.addOperand(MCOperand::CreateImm(Mask));
1456  }
1457
1458  void addImmOperands(MCInst &Inst, unsigned N) const {
1459    assert(N == 1 && "Invalid number of operands!");
1460    addExpr(Inst, getImm());
1461  }
1462
1463  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1464    assert(N == 1 && "Invalid number of operands!");
1465    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1466    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1467  }
1468
1469  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1470    assert(N == 1 && "Invalid number of operands!");
1471    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1472    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1473  }
1474
1475  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 1 && "Invalid number of operands!");
1477    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1478    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1479    Inst.addOperand(MCOperand::CreateImm(Val));
1480  }
1481
1482  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1483    assert(N == 1 && "Invalid number of operands!");
1484    // FIXME: We really want to scale the value here, but the LDRD/STRD
1485    // instruction don't encode operands that way yet.
1486    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1487    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1488  }
1489
1490  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1491    assert(N == 1 && "Invalid number of operands!");
1492    // The immediate is scaled by four in the encoding and is stored
1493    // in the MCInst as such. Lop off the low two bits here.
1494    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1495    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1496  }
1497
1498  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1499    assert(N == 1 && "Invalid number of operands!");
1500    // The immediate is scaled by four in the encoding and is stored
1501    // in the MCInst as such. Lop off the low two bits here.
1502    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1503    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1504  }
1505
1506  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1507    assert(N == 1 && "Invalid number of operands!");
1508    // The constant encodes as the immediate-1, and we store in the instruction
1509    // the bits as encoded, so subtract off one here.
1510    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1511    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1512  }
1513
1514  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1515    assert(N == 1 && "Invalid number of operands!");
1516    // The constant encodes as the immediate-1, and we store in the instruction
1517    // the bits as encoded, so subtract off one here.
1518    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1519    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1520  }
1521
1522  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1523    assert(N == 1 && "Invalid number of operands!");
1524    // The constant encodes as the immediate, except for 32, which encodes as
1525    // zero.
1526    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1527    unsigned Imm = CE->getValue();
1528    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1529  }
1530
1531  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1532    assert(N == 1 && "Invalid number of operands!");
1533    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1534    // the instruction as well.
1535    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1536    int Val = CE->getValue();
1537    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1538  }
1539
1540  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1541    assert(N == 1 && "Invalid number of operands!");
1542    // The operand is actually a t2_so_imm, but we have its bitwise
1543    // negation in the assembly source, so twiddle it here.
1544    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1545    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1546  }
1547
1548  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1549    assert(N == 1 && "Invalid number of operands!");
1550    // The operand is actually a t2_so_imm, but we have its
1551    // negation in the assembly source, so twiddle it here.
1552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1553    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1554  }
1555
1556  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1557    assert(N == 1 && "Invalid number of operands!");
1558    // The operand is actually a so_imm, but we have its bitwise
1559    // negation in the assembly source, so twiddle it here.
1560    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1561    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1562  }
1563
1564  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1565    assert(N == 1 && "Invalid number of operands!");
1566    // The operand is actually a so_imm, but we have its
1567    // negation in the assembly source, so twiddle it here.
1568    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1569    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1570  }
1571
1572  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1573    assert(N == 1 && "Invalid number of operands!");
1574    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1575  }
1576
1577  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1578    assert(N == 1 && "Invalid number of operands!");
1579    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1580  }
1581
1582  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1583    assert(N == 1 && "Invalid number of operands!");
1584    int32_t Imm = Memory.OffsetImm->getValue();
1585    // FIXME: Handle #-0
1586    if (Imm == INT32_MIN) Imm = 0;
1587    Inst.addOperand(MCOperand::CreateImm(Imm));
1588  }
1589
1590  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1591    assert(N == 2 && "Invalid number of operands!");
1592    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1593    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1594  }
1595
1596  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1597    assert(N == 3 && "Invalid number of operands!");
1598    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1599    if (!Memory.OffsetRegNum) {
1600      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1601      // Special case for #-0
1602      if (Val == INT32_MIN) Val = 0;
1603      if (Val < 0) Val = -Val;
1604      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1605    } else {
1606      // For register offset, we encode the shift type and negation flag
1607      // here.
1608      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1609                              Memory.ShiftImm, Memory.ShiftType);
1610    }
1611    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1612    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1613    Inst.addOperand(MCOperand::CreateImm(Val));
1614  }
1615
1616  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1617    assert(N == 2 && "Invalid number of operands!");
1618    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1619    assert(CE && "non-constant AM2OffsetImm operand!");
1620    int32_t Val = CE->getValue();
1621    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1622    // Special case for #-0
1623    if (Val == INT32_MIN) Val = 0;
1624    if (Val < 0) Val = -Val;
1625    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1626    Inst.addOperand(MCOperand::CreateReg(0));
1627    Inst.addOperand(MCOperand::CreateImm(Val));
1628  }
1629
1630  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1631    assert(N == 3 && "Invalid number of operands!");
1632    // If we have an immediate that's not a constant, treat it as a label
1633    // reference needing a fixup. If it is a constant, it's something else
1634    // and we reject it.
1635    if (isImm()) {
1636      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1637      Inst.addOperand(MCOperand::CreateReg(0));
1638      Inst.addOperand(MCOperand::CreateImm(0));
1639      return;
1640    }
1641
1642    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1643    if (!Memory.OffsetRegNum) {
1644      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1645      // Special case for #-0
1646      if (Val == INT32_MIN) Val = 0;
1647      if (Val < 0) Val = -Val;
1648      Val = ARM_AM::getAM3Opc(AddSub, Val);
1649    } else {
1650      // For register offset, we encode the shift type and negation flag
1651      // here.
1652      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1653    }
1654    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1655    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1656    Inst.addOperand(MCOperand::CreateImm(Val));
1657  }
1658
1659  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1660    assert(N == 2 && "Invalid number of operands!");
1661    if (Kind == k_PostIndexRegister) {
1662      int32_t Val =
1663        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1664      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1665      Inst.addOperand(MCOperand::CreateImm(Val));
1666      return;
1667    }
1668
1669    // Constant offset.
1670    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1671    int32_t Val = CE->getValue();
1672    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1673    // Special case for #-0
1674    if (Val == INT32_MIN) Val = 0;
1675    if (Val < 0) Val = -Val;
1676    Val = ARM_AM::getAM3Opc(AddSub, Val);
1677    Inst.addOperand(MCOperand::CreateReg(0));
1678    Inst.addOperand(MCOperand::CreateImm(Val));
1679  }
1680
1681  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1682    assert(N == 2 && "Invalid number of operands!");
1683    // If we have an immediate that's not a constant, treat it as a label
1684    // reference needing a fixup. If it is a constant, it's something else
1685    // and we reject it.
1686    if (isImm()) {
1687      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1688      Inst.addOperand(MCOperand::CreateImm(0));
1689      return;
1690    }
1691
1692    // The lower two bits are always zero and as such are not encoded.
1693    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1694    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1695    // Special case for #-0
1696    if (Val == INT32_MIN) Val = 0;
1697    if (Val < 0) Val = -Val;
1698    Val = ARM_AM::getAM5Opc(AddSub, Val);
1699    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1700    Inst.addOperand(MCOperand::CreateImm(Val));
1701  }
1702
1703  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1704    assert(N == 2 && "Invalid number of operands!");
1705    // If we have an immediate that's not a constant, treat it as a label
1706    // reference needing a fixup. If it is a constant, it's something else
1707    // and we reject it.
1708    if (isImm()) {
1709      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1710      Inst.addOperand(MCOperand::CreateImm(0));
1711      return;
1712    }
1713
1714    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1715    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1716    Inst.addOperand(MCOperand::CreateImm(Val));
1717  }
1718
1719  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1720    assert(N == 2 && "Invalid number of operands!");
1721    // The lower two bits are always zero and as such are not encoded.
1722    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1723    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1724    Inst.addOperand(MCOperand::CreateImm(Val));
1725  }
1726
1727  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1728    assert(N == 2 && "Invalid number of operands!");
1729    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1730    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1731    Inst.addOperand(MCOperand::CreateImm(Val));
1732  }
1733
1734  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1735    addMemImm8OffsetOperands(Inst, N);
1736  }
1737
1738  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1739    addMemImm8OffsetOperands(Inst, N);
1740  }
1741
1742  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1743    assert(N == 2 && "Invalid number of operands!");
1744    // If this is an immediate, it's a label reference.
1745    if (isImm()) {
1746      addExpr(Inst, getImm());
1747      Inst.addOperand(MCOperand::CreateImm(0));
1748      return;
1749    }
1750
1751    // Otherwise, it's a normal memory reg+offset.
1752    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1753    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1754    Inst.addOperand(MCOperand::CreateImm(Val));
1755  }
1756
1757  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1758    assert(N == 2 && "Invalid number of operands!");
1759    // If this is an immediate, it's a label reference.
1760    if (isImm()) {
1761      addExpr(Inst, getImm());
1762      Inst.addOperand(MCOperand::CreateImm(0));
1763      return;
1764    }
1765
1766    // Otherwise, it's a normal memory reg+offset.
1767    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1768    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1769    Inst.addOperand(MCOperand::CreateImm(Val));
1770  }
1771
1772  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1773    assert(N == 2 && "Invalid number of operands!");
1774    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1775    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1776  }
1777
1778  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1779    assert(N == 2 && "Invalid number of operands!");
1780    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1781    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1782  }
1783
1784  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1785    assert(N == 3 && "Invalid number of operands!");
1786    unsigned Val =
1787      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1788                        Memory.ShiftImm, Memory.ShiftType);
1789    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1790    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1791    Inst.addOperand(MCOperand::CreateImm(Val));
1792  }
1793
1794  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1795    assert(N == 3 && "Invalid number of operands!");
1796    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1797    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1798    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1799  }
1800
1801  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1802    assert(N == 2 && "Invalid number of operands!");
1803    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1804    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1805  }
1806
1807  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1808    assert(N == 2 && "Invalid number of operands!");
1809    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1810    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1811    Inst.addOperand(MCOperand::CreateImm(Val));
1812  }
1813
1814  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1815    assert(N == 2 && "Invalid number of operands!");
1816    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1817    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1818    Inst.addOperand(MCOperand::CreateImm(Val));
1819  }
1820
1821  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1822    assert(N == 2 && "Invalid number of operands!");
1823    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1824    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1825    Inst.addOperand(MCOperand::CreateImm(Val));
1826  }
1827
1828  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1829    assert(N == 2 && "Invalid number of operands!");
1830    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1831    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1832    Inst.addOperand(MCOperand::CreateImm(Val));
1833  }
1834
1835  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1836    assert(N == 1 && "Invalid number of operands!");
1837    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1838    assert(CE && "non-constant post-idx-imm8 operand!");
1839    int Imm = CE->getValue();
1840    bool isAdd = Imm >= 0;
1841    if (Imm == INT32_MIN) Imm = 0;
1842    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1843    Inst.addOperand(MCOperand::CreateImm(Imm));
1844  }
1845
1846  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1847    assert(N == 1 && "Invalid number of operands!");
1848    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1849    assert(CE && "non-constant post-idx-imm8s4 operand!");
1850    int Imm = CE->getValue();
1851    bool isAdd = Imm >= 0;
1852    if (Imm == INT32_MIN) Imm = 0;
1853    // Immediate is scaled by 4.
1854    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1855    Inst.addOperand(MCOperand::CreateImm(Imm));
1856  }
1857
1858  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1859    assert(N == 2 && "Invalid number of operands!");
1860    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1861    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1862  }
1863
1864  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1865    assert(N == 2 && "Invalid number of operands!");
1866    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1867    // The sign, shift type, and shift amount are encoded in a single operand
1868    // using the AM2 encoding helpers.
1869    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1870    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1871                                     PostIdxReg.ShiftTy);
1872    Inst.addOperand(MCOperand::CreateImm(Imm));
1873  }
1874
1875  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1876    assert(N == 1 && "Invalid number of operands!");
1877    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1878  }
1879
1880  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1881    assert(N == 1 && "Invalid number of operands!");
1882    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1883  }
1884
1885  void addVecListOperands(MCInst &Inst, unsigned N) const {
1886    assert(N == 1 && "Invalid number of operands!");
1887    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1888  }
1889
1890  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1891    assert(N == 2 && "Invalid number of operands!");
1892    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1893    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1894  }
1895
1896  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1897    assert(N == 1 && "Invalid number of operands!");
1898    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1899  }
1900
1901  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1902    assert(N == 1 && "Invalid number of operands!");
1903    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1904  }
1905
1906  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1907    assert(N == 1 && "Invalid number of operands!");
1908    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1909  }
1910
1911  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1912    assert(N == 1 && "Invalid number of operands!");
1913    // The immediate encodes the type of constant as well as the value.
1914    // Mask in that this is an i8 splat.
1915    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1916    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1917  }
1918
1919  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1920    assert(N == 1 && "Invalid number of operands!");
1921    // The immediate encodes the type of constant as well as the value.
1922    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1923    unsigned Value = CE->getValue();
1924    if (Value >= 256)
1925      Value = (Value >> 8) | 0xa00;
1926    else
1927      Value |= 0x800;
1928    Inst.addOperand(MCOperand::CreateImm(Value));
1929  }
1930
1931  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1932    assert(N == 1 && "Invalid number of operands!");
1933    // The immediate encodes the type of constant as well as the value.
1934    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1935    unsigned Value = CE->getValue();
1936    if (Value >= 256 && Value <= 0xff00)
1937      Value = (Value >> 8) | 0x200;
1938    else if (Value > 0xffff && Value <= 0xff0000)
1939      Value = (Value >> 16) | 0x400;
1940    else if (Value > 0xffffff)
1941      Value = (Value >> 24) | 0x600;
1942    Inst.addOperand(MCOperand::CreateImm(Value));
1943  }
1944
1945  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1946    assert(N == 1 && "Invalid number of operands!");
1947    // The immediate encodes the type of constant as well as the value.
1948    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1949    unsigned Value = CE->getValue();
1950    if (Value >= 256 && Value <= 0xffff)
1951      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1952    else if (Value > 0xffff && Value <= 0xffffff)
1953      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1954    else if (Value > 0xffffff)
1955      Value = (Value >> 24) | 0x600;
1956    Inst.addOperand(MCOperand::CreateImm(Value));
1957  }
1958
1959  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1960    assert(N == 1 && "Invalid number of operands!");
1961    // The immediate encodes the type of constant as well as the value.
1962    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1963    unsigned Value = ~CE->getValue();
1964    if (Value >= 256 && Value <= 0xffff)
1965      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1966    else if (Value > 0xffff && Value <= 0xffffff)
1967      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1968    else if (Value > 0xffffff)
1969      Value = (Value >> 24) | 0x600;
1970    Inst.addOperand(MCOperand::CreateImm(Value));
1971  }
1972
1973  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1974    assert(N == 1 && "Invalid number of operands!");
1975    // The immediate encodes the type of constant as well as the value.
1976    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1977    uint64_t Value = CE->getValue();
1978    unsigned Imm = 0;
1979    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1980      Imm |= (Value & 1) << i;
1981    }
1982    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1983  }
1984
1985  virtual void print(raw_ostream &OS) const;
1986
1987  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1988    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1989    Op->ITMask.Mask = Mask;
1990    Op->StartLoc = S;
1991    Op->EndLoc = S;
1992    return Op;
1993  }
1994
1995  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1996    ARMOperand *Op = new ARMOperand(k_CondCode);
1997    Op->CC.Val = CC;
1998    Op->StartLoc = S;
1999    Op->EndLoc = S;
2000    return Op;
2001  }
2002
2003  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2004    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2005    Op->Cop.Val = CopVal;
2006    Op->StartLoc = S;
2007    Op->EndLoc = S;
2008    return Op;
2009  }
2010
2011  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2012    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2013    Op->Cop.Val = CopVal;
2014    Op->StartLoc = S;
2015    Op->EndLoc = S;
2016    return Op;
2017  }
2018
2019  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2020    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2021    Op->Cop.Val = Val;
2022    Op->StartLoc = S;
2023    Op->EndLoc = E;
2024    return Op;
2025  }
2026
2027  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2028    ARMOperand *Op = new ARMOperand(k_CCOut);
2029    Op->Reg.RegNum = RegNum;
2030    Op->StartLoc = S;
2031    Op->EndLoc = S;
2032    return Op;
2033  }
2034
2035  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2036    ARMOperand *Op = new ARMOperand(k_Token);
2037    Op->Tok.Data = Str.data();
2038    Op->Tok.Length = Str.size();
2039    Op->StartLoc = S;
2040    Op->EndLoc = S;
2041    return Op;
2042  }
2043
2044  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2045    ARMOperand *Op = new ARMOperand(k_Register);
2046    Op->Reg.RegNum = RegNum;
2047    Op->StartLoc = S;
2048    Op->EndLoc = E;
2049    return Op;
2050  }
2051
2052  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2053                                           unsigned SrcReg,
2054                                           unsigned ShiftReg,
2055                                           unsigned ShiftImm,
2056                                           SMLoc S, SMLoc E) {
2057    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2058    Op->RegShiftedReg.ShiftTy = ShTy;
2059    Op->RegShiftedReg.SrcReg = SrcReg;
2060    Op->RegShiftedReg.ShiftReg = ShiftReg;
2061    Op->RegShiftedReg.ShiftImm = ShiftImm;
2062    Op->StartLoc = S;
2063    Op->EndLoc = E;
2064    return Op;
2065  }
2066
2067  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2068                                            unsigned SrcReg,
2069                                            unsigned ShiftImm,
2070                                            SMLoc S, SMLoc E) {
2071    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2072    Op->RegShiftedImm.ShiftTy = ShTy;
2073    Op->RegShiftedImm.SrcReg = SrcReg;
2074    Op->RegShiftedImm.ShiftImm = ShiftImm;
2075    Op->StartLoc = S;
2076    Op->EndLoc = E;
2077    return Op;
2078  }
2079
2080  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2081                                   SMLoc S, SMLoc E) {
2082    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2083    Op->ShifterImm.isASR = isASR;
2084    Op->ShifterImm.Imm = Imm;
2085    Op->StartLoc = S;
2086    Op->EndLoc = E;
2087    return Op;
2088  }
2089
2090  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2091    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2092    Op->RotImm.Imm = Imm;
2093    Op->StartLoc = S;
2094    Op->EndLoc = E;
2095    return Op;
2096  }
2097
2098  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2099                                    SMLoc S, SMLoc E) {
2100    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2101    Op->Bitfield.LSB = LSB;
2102    Op->Bitfield.Width = Width;
2103    Op->StartLoc = S;
2104    Op->EndLoc = E;
2105    return Op;
2106  }
2107
2108  static ARMOperand *
2109  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2110                SMLoc StartLoc, SMLoc EndLoc) {
2111    KindTy Kind = k_RegisterList;
2112
2113    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2114      Kind = k_DPRRegisterList;
2115    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2116             contains(Regs.front().first))
2117      Kind = k_SPRRegisterList;
2118
2119    ARMOperand *Op = new ARMOperand(Kind);
2120    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2121           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2122      Op->Registers.push_back(I->first);
2123    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2124    Op->StartLoc = StartLoc;
2125    Op->EndLoc = EndLoc;
2126    return Op;
2127  }
2128
2129  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2130                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2131    ARMOperand *Op = new ARMOperand(k_VectorList);
2132    Op->VectorList.RegNum = RegNum;
2133    Op->VectorList.Count = Count;
2134    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2135    Op->StartLoc = S;
2136    Op->EndLoc = E;
2137    return Op;
2138  }
2139
2140  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2141                                              bool isDoubleSpaced,
2142                                              SMLoc S, SMLoc E) {
2143    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2144    Op->VectorList.RegNum = RegNum;
2145    Op->VectorList.Count = Count;
2146    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2147    Op->StartLoc = S;
2148    Op->EndLoc = E;
2149    return Op;
2150  }
2151
2152  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2153                                             unsigned Index,
2154                                             bool isDoubleSpaced,
2155                                             SMLoc S, SMLoc E) {
2156    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2157    Op->VectorList.RegNum = RegNum;
2158    Op->VectorList.Count = Count;
2159    Op->VectorList.LaneIndex = Index;
2160    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2161    Op->StartLoc = S;
2162    Op->EndLoc = E;
2163    return Op;
2164  }
2165
2166  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2167                                       MCContext &Ctx) {
2168    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2169    Op->VectorIndex.Val = Idx;
2170    Op->StartLoc = S;
2171    Op->EndLoc = E;
2172    return Op;
2173  }
2174
2175  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2176    ARMOperand *Op = new ARMOperand(k_Immediate);
2177    Op->Imm.Val = Val;
2178    Op->StartLoc = S;
2179    Op->EndLoc = E;
2180    return Op;
2181  }
2182
2183  static ARMOperand *CreateMem(unsigned BaseRegNum,
2184                               const MCConstantExpr *OffsetImm,
2185                               unsigned OffsetRegNum,
2186                               ARM_AM::ShiftOpc ShiftType,
2187                               unsigned ShiftImm,
2188                               unsigned Alignment,
2189                               bool isNegative,
2190                               SMLoc S, SMLoc E) {
2191    ARMOperand *Op = new ARMOperand(k_Memory);
2192    Op->Memory.BaseRegNum = BaseRegNum;
2193    Op->Memory.OffsetImm = OffsetImm;
2194    Op->Memory.OffsetRegNum = OffsetRegNum;
2195    Op->Memory.ShiftType = ShiftType;
2196    Op->Memory.ShiftImm = ShiftImm;
2197    Op->Memory.Alignment = Alignment;
2198    Op->Memory.isNegative = isNegative;
2199    Op->StartLoc = S;
2200    Op->EndLoc = E;
2201    return Op;
2202  }
2203
2204  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2205                                      ARM_AM::ShiftOpc ShiftTy,
2206                                      unsigned ShiftImm,
2207                                      SMLoc S, SMLoc E) {
2208    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2209    Op->PostIdxReg.RegNum = RegNum;
2210    Op->PostIdxReg.isAdd = isAdd;
2211    Op->PostIdxReg.ShiftTy = ShiftTy;
2212    Op->PostIdxReg.ShiftImm = ShiftImm;
2213    Op->StartLoc = S;
2214    Op->EndLoc = E;
2215    return Op;
2216  }
2217
2218  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2219    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2220    Op->MBOpt.Val = Opt;
2221    Op->StartLoc = S;
2222    Op->EndLoc = S;
2223    return Op;
2224  }
2225
2226  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2227    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2228    Op->IFlags.Val = IFlags;
2229    Op->StartLoc = S;
2230    Op->EndLoc = S;
2231    return Op;
2232  }
2233
2234  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2235    ARMOperand *Op = new ARMOperand(k_MSRMask);
2236    Op->MMask.Val = MMask;
2237    Op->StartLoc = S;
2238    Op->EndLoc = S;
2239    return Op;
2240  }
2241};
2242
2243} // end anonymous namespace.
2244
2245void ARMOperand::print(raw_ostream &OS) const {
2246  switch (Kind) {
2247  case k_CondCode:
2248    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2249    break;
2250  case k_CCOut:
2251    OS << "<ccout " << getReg() << ">";
2252    break;
2253  case k_ITCondMask: {
2254    static const char *MaskStr[] = {
2255      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2256      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2257    };
2258    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2259    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2260    break;
2261  }
2262  case k_CoprocNum:
2263    OS << "<coprocessor number: " << getCoproc() << ">";
2264    break;
2265  case k_CoprocReg:
2266    OS << "<coprocessor register: " << getCoproc() << ">";
2267    break;
2268  case k_CoprocOption:
2269    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2270    break;
2271  case k_MSRMask:
2272    OS << "<mask: " << getMSRMask() << ">";
2273    break;
2274  case k_Immediate:
2275    getImm()->print(OS);
2276    break;
2277  case k_MemBarrierOpt:
2278    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2279    break;
2280  case k_Memory:
2281    OS << "<memory "
2282       << " base:" << Memory.BaseRegNum;
2283    OS << ">";
2284    break;
2285  case k_PostIndexRegister:
2286    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2287       << PostIdxReg.RegNum;
2288    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2289      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2290         << PostIdxReg.ShiftImm;
2291    OS << ">";
2292    break;
2293  case k_ProcIFlags: {
2294    OS << "<ARM_PROC::";
2295    unsigned IFlags = getProcIFlags();
2296    for (int i=2; i >= 0; --i)
2297      if (IFlags & (1 << i))
2298        OS << ARM_PROC::IFlagsToString(1 << i);
2299    OS << ">";
2300    break;
2301  }
2302  case k_Register:
2303    OS << "<register " << getReg() << ">";
2304    break;
2305  case k_ShifterImmediate:
2306    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2307       << " #" << ShifterImm.Imm << ">";
2308    break;
2309  case k_ShiftedRegister:
2310    OS << "<so_reg_reg "
2311       << RegShiftedReg.SrcReg << " "
2312       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2313       << " " << RegShiftedReg.ShiftReg << ">";
2314    break;
2315  case k_ShiftedImmediate:
2316    OS << "<so_reg_imm "
2317       << RegShiftedImm.SrcReg << " "
2318       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2319       << " #" << RegShiftedImm.ShiftImm << ">";
2320    break;
2321  case k_RotateImmediate:
2322    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2323    break;
2324  case k_BitfieldDescriptor:
2325    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2326       << ", width: " << Bitfield.Width << ">";
2327    break;
2328  case k_RegisterList:
2329  case k_DPRRegisterList:
2330  case k_SPRRegisterList: {
2331    OS << "<register_list ";
2332
2333    const SmallVectorImpl<unsigned> &RegList = getRegList();
2334    for (SmallVectorImpl<unsigned>::const_iterator
2335           I = RegList.begin(), E = RegList.end(); I != E; ) {
2336      OS << *I;
2337      if (++I < E) OS << ", ";
2338    }
2339
2340    OS << ">";
2341    break;
2342  }
2343  case k_VectorList:
2344    OS << "<vector_list " << VectorList.Count << " * "
2345       << VectorList.RegNum << ">";
2346    break;
2347  case k_VectorListAllLanes:
2348    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2349       << VectorList.RegNum << ">";
2350    break;
2351  case k_VectorListIndexed:
2352    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2353       << VectorList.Count << " * " << VectorList.RegNum << ">";
2354    break;
2355  case k_Token:
2356    OS << "'" << getToken() << "'";
2357    break;
2358  case k_VectorIndex:
2359    OS << "<vectorindex " << getVectorIndex() << ">";
2360    break;
2361  }
2362}
2363
2364/// @name Auto-generated Match Functions
2365/// {
2366
2367static unsigned MatchRegisterName(StringRef Name);
2368
2369/// }
2370
2371bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2372                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2373  StartLoc = Parser.getTok().getLoc();
2374  RegNo = tryParseRegister();
2375  EndLoc = Parser.getTok().getLoc();
2376
2377  return (RegNo == (unsigned)-1);
2378}
2379
2380/// Try to parse a register name.  The token must be an Identifier when called,
2381/// and if it is a register name the token is eaten and the register number is
2382/// returned.  Otherwise return -1.
2383///
2384int ARMAsmParser::tryParseRegister() {
2385  const AsmToken &Tok = Parser.getTok();
2386  if (Tok.isNot(AsmToken::Identifier)) return -1;
2387
2388  std::string lowerCase = Tok.getString().lower();
2389  unsigned RegNum = MatchRegisterName(lowerCase);
2390  if (!RegNum) {
2391    RegNum = StringSwitch<unsigned>(lowerCase)
2392      .Case("r13", ARM::SP)
2393      .Case("r14", ARM::LR)
2394      .Case("r15", ARM::PC)
2395      .Case("ip", ARM::R12)
2396      // Additional register name aliases for 'gas' compatibility.
2397      .Case("a1", ARM::R0)
2398      .Case("a2", ARM::R1)
2399      .Case("a3", ARM::R2)
2400      .Case("a4", ARM::R3)
2401      .Case("v1", ARM::R4)
2402      .Case("v2", ARM::R5)
2403      .Case("v3", ARM::R6)
2404      .Case("v4", ARM::R7)
2405      .Case("v5", ARM::R8)
2406      .Case("v6", ARM::R9)
2407      .Case("v7", ARM::R10)
2408      .Case("v8", ARM::R11)
2409      .Case("sb", ARM::R9)
2410      .Case("sl", ARM::R10)
2411      .Case("fp", ARM::R11)
2412      .Default(0);
2413  }
2414  if (!RegNum) {
2415    // Check for aliases registered via .req. Canonicalize to lower case.
2416    // That's more consistent since register names are case insensitive, and
2417    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2418    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2419    // If no match, return failure.
2420    if (Entry == RegisterReqs.end())
2421      return -1;
2422    Parser.Lex(); // Eat identifier token.
2423    return Entry->getValue();
2424  }
2425
2426  Parser.Lex(); // Eat identifier token.
2427
2428  return RegNum;
2429}
2430
2431// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2432// If a recoverable error occurs, return 1. If an irrecoverable error
2433// occurs, return -1. An irrecoverable error is one where tokens have been
2434// consumed in the process of trying to parse the shifter (i.e., when it is
2435// indeed a shifter operand, but malformed).
2436int ARMAsmParser::tryParseShiftRegister(
2437                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2438  SMLoc S = Parser.getTok().getLoc();
2439  const AsmToken &Tok = Parser.getTok();
2440  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2441
2442  std::string lowerCase = Tok.getString().lower();
2443  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2444      .Case("asl", ARM_AM::lsl)
2445      .Case("lsl", ARM_AM::lsl)
2446      .Case("lsr", ARM_AM::lsr)
2447      .Case("asr", ARM_AM::asr)
2448      .Case("ror", ARM_AM::ror)
2449      .Case("rrx", ARM_AM::rrx)
2450      .Default(ARM_AM::no_shift);
2451
2452  if (ShiftTy == ARM_AM::no_shift)
2453    return 1;
2454
2455  Parser.Lex(); // Eat the operator.
2456
2457  // The source register for the shift has already been added to the
2458  // operand list, so we need to pop it off and combine it into the shifted
2459  // register operand instead.
2460  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2461  if (!PrevOp->isReg())
2462    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2463  int SrcReg = PrevOp->getReg();
2464  int64_t Imm = 0;
2465  int ShiftReg = 0;
2466  if (ShiftTy == ARM_AM::rrx) {
2467    // RRX Doesn't have an explicit shift amount. The encoder expects
2468    // the shift register to be the same as the source register. Seems odd,
2469    // but OK.
2470    ShiftReg = SrcReg;
2471  } else {
2472    // Figure out if this is shifted by a constant or a register (for non-RRX).
2473    if (Parser.getTok().is(AsmToken::Hash) ||
2474        Parser.getTok().is(AsmToken::Dollar)) {
2475      Parser.Lex(); // Eat hash.
2476      SMLoc ImmLoc = Parser.getTok().getLoc();
2477      const MCExpr *ShiftExpr = 0;
2478      if (getParser().ParseExpression(ShiftExpr)) {
2479        Error(ImmLoc, "invalid immediate shift value");
2480        return -1;
2481      }
2482      // The expression must be evaluatable as an immediate.
2483      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2484      if (!CE) {
2485        Error(ImmLoc, "invalid immediate shift value");
2486        return -1;
2487      }
2488      // Range check the immediate.
2489      // lsl, ror: 0 <= imm <= 31
2490      // lsr, asr: 0 <= imm <= 32
2491      Imm = CE->getValue();
2492      if (Imm < 0 ||
2493          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2494          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2495        Error(ImmLoc, "immediate shift value out of range");
2496        return -1;
2497      }
2498      // shift by zero is a nop. Always send it through as lsl.
2499      // ('as' compatibility)
2500      if (Imm == 0)
2501        ShiftTy = ARM_AM::lsl;
2502    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2503      ShiftReg = tryParseRegister();
2504      SMLoc L = Parser.getTok().getLoc();
2505      if (ShiftReg == -1) {
2506        Error (L, "expected immediate or register in shift operand");
2507        return -1;
2508      }
2509    } else {
2510      Error (Parser.getTok().getLoc(),
2511                    "expected immediate or register in shift operand");
2512      return -1;
2513    }
2514  }
2515
2516  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2517    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2518                                                         ShiftReg, Imm,
2519                                               S, Parser.getTok().getLoc()));
2520  else
2521    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2522                                               S, Parser.getTok().getLoc()));
2523
2524  return 0;
2525}
2526
2527
2528/// Try to parse a register name.  The token must be an Identifier when called.
2529/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2530/// if there is a "writeback". 'true' if it's not a register.
2531///
2532/// TODO this is likely to change to allow different register types and or to
2533/// parse for a specific register type.
2534bool ARMAsmParser::
2535tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2536  SMLoc S = Parser.getTok().getLoc();
2537  int RegNo = tryParseRegister();
2538  if (RegNo == -1)
2539    return true;
2540
2541  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2542
2543  const AsmToken &ExclaimTok = Parser.getTok();
2544  if (ExclaimTok.is(AsmToken::Exclaim)) {
2545    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2546                                               ExclaimTok.getLoc()));
2547    Parser.Lex(); // Eat exclaim token
2548    return false;
2549  }
2550
2551  // Also check for an index operand. This is only legal for vector registers,
2552  // but that'll get caught OK in operand matching, so we don't need to
2553  // explicitly filter everything else out here.
2554  if (Parser.getTok().is(AsmToken::LBrac)) {
2555    SMLoc SIdx = Parser.getTok().getLoc();
2556    Parser.Lex(); // Eat left bracket token.
2557
2558    const MCExpr *ImmVal;
2559    if (getParser().ParseExpression(ImmVal))
2560      return true;
2561    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2562    if (!MCE)
2563      return TokError("immediate value expected for vector index");
2564
2565    SMLoc E = Parser.getTok().getLoc();
2566    if (Parser.getTok().isNot(AsmToken::RBrac))
2567      return Error(E, "']' expected");
2568
2569    Parser.Lex(); // Eat right bracket token.
2570
2571    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2572                                                     SIdx, E,
2573                                                     getContext()));
2574  }
2575
2576  return false;
2577}
2578
2579/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2580/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2581/// "c5", ...
2582static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2583  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2584  // but efficient.
2585  switch (Name.size()) {
2586  default: return -1;
2587  case 2:
2588    if (Name[0] != CoprocOp)
2589      return -1;
2590    switch (Name[1]) {
2591    default:  return -1;
2592    case '0': return 0;
2593    case '1': return 1;
2594    case '2': return 2;
2595    case '3': return 3;
2596    case '4': return 4;
2597    case '5': return 5;
2598    case '6': return 6;
2599    case '7': return 7;
2600    case '8': return 8;
2601    case '9': return 9;
2602    }
2603  case 3:
2604    if (Name[0] != CoprocOp || Name[1] != '1')
2605      return -1;
2606    switch (Name[2]) {
2607    default:  return -1;
2608    case '0': return 10;
2609    case '1': return 11;
2610    case '2': return 12;
2611    case '3': return 13;
2612    case '4': return 14;
2613    case '5': return 15;
2614    }
2615  }
2616}
2617
2618/// parseITCondCode - Try to parse a condition code for an IT instruction.
2619ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2620parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2621  SMLoc S = Parser.getTok().getLoc();
2622  const AsmToken &Tok = Parser.getTok();
2623  if (!Tok.is(AsmToken::Identifier))
2624    return MatchOperand_NoMatch;
2625  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2626    .Case("eq", ARMCC::EQ)
2627    .Case("ne", ARMCC::NE)
2628    .Case("hs", ARMCC::HS)
2629    .Case("cs", ARMCC::HS)
2630    .Case("lo", ARMCC::LO)
2631    .Case("cc", ARMCC::LO)
2632    .Case("mi", ARMCC::MI)
2633    .Case("pl", ARMCC::PL)
2634    .Case("vs", ARMCC::VS)
2635    .Case("vc", ARMCC::VC)
2636    .Case("hi", ARMCC::HI)
2637    .Case("ls", ARMCC::LS)
2638    .Case("ge", ARMCC::GE)
2639    .Case("lt", ARMCC::LT)
2640    .Case("gt", ARMCC::GT)
2641    .Case("le", ARMCC::LE)
2642    .Case("al", ARMCC::AL)
2643    .Default(~0U);
2644  if (CC == ~0U)
2645    return MatchOperand_NoMatch;
2646  Parser.Lex(); // Eat the token.
2647
2648  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2649
2650  return MatchOperand_Success;
2651}
2652
2653/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2654/// token must be an Identifier when called, and if it is a coprocessor
2655/// number, the token is eaten and the operand is added to the operand list.
2656ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2657parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2658  SMLoc S = Parser.getTok().getLoc();
2659  const AsmToken &Tok = Parser.getTok();
2660  if (Tok.isNot(AsmToken::Identifier))
2661    return MatchOperand_NoMatch;
2662
2663  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2664  if (Num == -1)
2665    return MatchOperand_NoMatch;
2666
2667  Parser.Lex(); // Eat identifier token.
2668  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2669  return MatchOperand_Success;
2670}
2671
2672/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2673/// token must be an Identifier when called, and if it is a coprocessor
2674/// number, the token is eaten and the operand is added to the operand list.
2675ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2676parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2677  SMLoc S = Parser.getTok().getLoc();
2678  const AsmToken &Tok = Parser.getTok();
2679  if (Tok.isNot(AsmToken::Identifier))
2680    return MatchOperand_NoMatch;
2681
2682  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2683  if (Reg == -1)
2684    return MatchOperand_NoMatch;
2685
2686  Parser.Lex(); // Eat identifier token.
2687  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2688  return MatchOperand_Success;
2689}
2690
2691/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2692/// coproc_option : '{' imm0_255 '}'
2693ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2694parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2695  SMLoc S = Parser.getTok().getLoc();
2696
2697  // If this isn't a '{', this isn't a coprocessor immediate operand.
2698  if (Parser.getTok().isNot(AsmToken::LCurly))
2699    return MatchOperand_NoMatch;
2700  Parser.Lex(); // Eat the '{'
2701
2702  const MCExpr *Expr;
2703  SMLoc Loc = Parser.getTok().getLoc();
2704  if (getParser().ParseExpression(Expr)) {
2705    Error(Loc, "illegal expression");
2706    return MatchOperand_ParseFail;
2707  }
2708  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2709  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2710    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2711    return MatchOperand_ParseFail;
2712  }
2713  int Val = CE->getValue();
2714
2715  // Check for and consume the closing '}'
2716  if (Parser.getTok().isNot(AsmToken::RCurly))
2717    return MatchOperand_ParseFail;
2718  SMLoc E = Parser.getTok().getLoc();
2719  Parser.Lex(); // Eat the '}'
2720
2721  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2722  return MatchOperand_Success;
2723}
2724
2725// For register list parsing, we need to map from raw GPR register numbering
2726// to the enumeration values. The enumeration values aren't sorted by
2727// register number due to our using "sp", "lr" and "pc" as canonical names.
2728static unsigned getNextRegister(unsigned Reg) {
2729  // If this is a GPR, we need to do it manually, otherwise we can rely
2730  // on the sort ordering of the enumeration since the other reg-classes
2731  // are sane.
2732  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2733    return Reg + 1;
2734  switch(Reg) {
2735  default: llvm_unreachable("Invalid GPR number!");
2736  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2737  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2738  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2739  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2740  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2741  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2742  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2743  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2744  }
2745}
2746
2747// Return the low-subreg of a given Q register.
2748static unsigned getDRegFromQReg(unsigned QReg) {
2749  switch (QReg) {
2750  default: llvm_unreachable("expected a Q register!");
2751  case ARM::Q0:  return ARM::D0;
2752  case ARM::Q1:  return ARM::D2;
2753  case ARM::Q2:  return ARM::D4;
2754  case ARM::Q3:  return ARM::D6;
2755  case ARM::Q4:  return ARM::D8;
2756  case ARM::Q5:  return ARM::D10;
2757  case ARM::Q6:  return ARM::D12;
2758  case ARM::Q7:  return ARM::D14;
2759  case ARM::Q8:  return ARM::D16;
2760  case ARM::Q9:  return ARM::D18;
2761  case ARM::Q10: return ARM::D20;
2762  case ARM::Q11: return ARM::D22;
2763  case ARM::Q12: return ARM::D24;
2764  case ARM::Q13: return ARM::D26;
2765  case ARM::Q14: return ARM::D28;
2766  case ARM::Q15: return ARM::D30;
2767  }
2768}
2769
2770/// Parse a register list.
2771bool ARMAsmParser::
2772parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2773  assert(Parser.getTok().is(AsmToken::LCurly) &&
2774         "Token is not a Left Curly Brace");
2775  SMLoc S = Parser.getTok().getLoc();
2776  Parser.Lex(); // Eat '{' token.
2777  SMLoc RegLoc = Parser.getTok().getLoc();
2778
2779  // Check the first register in the list to see what register class
2780  // this is a list of.
2781  int Reg = tryParseRegister();
2782  if (Reg == -1)
2783    return Error(RegLoc, "register expected");
2784
2785  // The reglist instructions have at most 16 registers, so reserve
2786  // space for that many.
2787  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2788
2789  // Allow Q regs and just interpret them as the two D sub-registers.
2790  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2791    Reg = getDRegFromQReg(Reg);
2792    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2793    ++Reg;
2794  }
2795  const MCRegisterClass *RC;
2796  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2797    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2798  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2799    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2800  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2801    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2802  else
2803    return Error(RegLoc, "invalid register in register list");
2804
2805  // Store the register.
2806  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2807
2808  // This starts immediately after the first register token in the list,
2809  // so we can see either a comma or a minus (range separator) as a legal
2810  // next token.
2811  while (Parser.getTok().is(AsmToken::Comma) ||
2812         Parser.getTok().is(AsmToken::Minus)) {
2813    if (Parser.getTok().is(AsmToken::Minus)) {
2814      Parser.Lex(); // Eat the minus.
2815      SMLoc EndLoc = Parser.getTok().getLoc();
2816      int EndReg = tryParseRegister();
2817      if (EndReg == -1)
2818        return Error(EndLoc, "register expected");
2819      // Allow Q regs and just interpret them as the two D sub-registers.
2820      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2821        EndReg = getDRegFromQReg(EndReg) + 1;
2822      // If the register is the same as the start reg, there's nothing
2823      // more to do.
2824      if (Reg == EndReg)
2825        continue;
2826      // The register must be in the same register class as the first.
2827      if (!RC->contains(EndReg))
2828        return Error(EndLoc, "invalid register in register list");
2829      // Ranges must go from low to high.
2830      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2831        return Error(EndLoc, "bad range in register list");
2832
2833      // Add all the registers in the range to the register list.
2834      while (Reg != EndReg) {
2835        Reg = getNextRegister(Reg);
2836        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2837      }
2838      continue;
2839    }
2840    Parser.Lex(); // Eat the comma.
2841    RegLoc = Parser.getTok().getLoc();
2842    int OldReg = Reg;
2843    const AsmToken RegTok = Parser.getTok();
2844    Reg = tryParseRegister();
2845    if (Reg == -1)
2846      return Error(RegLoc, "register expected");
2847    // Allow Q regs and just interpret them as the two D sub-registers.
2848    bool isQReg = false;
2849    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2850      Reg = getDRegFromQReg(Reg);
2851      isQReg = true;
2852    }
2853    // The register must be in the same register class as the first.
2854    if (!RC->contains(Reg))
2855      return Error(RegLoc, "invalid register in register list");
2856    // List must be monotonically increasing.
2857    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2858      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2859        Warning(RegLoc, "register list not in ascending order");
2860      else
2861        return Error(RegLoc, "register list not in ascending order");
2862    }
2863    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2864      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2865              ") in register list");
2866      continue;
2867    }
2868    // VFP register lists must also be contiguous.
2869    // It's OK to use the enumeration values directly here rather, as the
2870    // VFP register classes have the enum sorted properly.
2871    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2872        Reg != OldReg + 1)
2873      return Error(RegLoc, "non-contiguous register range");
2874    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2875    if (isQReg)
2876      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2877  }
2878
2879  SMLoc E = Parser.getTok().getLoc();
2880  if (Parser.getTok().isNot(AsmToken::RCurly))
2881    return Error(E, "'}' expected");
2882  Parser.Lex(); // Eat '}' token.
2883
2884  // Push the register list operand.
2885  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2886
2887  // The ARM system instruction variants for LDM/STM have a '^' token here.
2888  if (Parser.getTok().is(AsmToken::Caret)) {
2889    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2890    Parser.Lex(); // Eat '^' token.
2891  }
2892
2893  return false;
2894}
2895
2896// Helper function to parse the lane index for vector lists.
2897ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2898parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2899  Index = 0; // Always return a defined index value.
2900  if (Parser.getTok().is(AsmToken::LBrac)) {
2901    Parser.Lex(); // Eat the '['.
2902    if (Parser.getTok().is(AsmToken::RBrac)) {
2903      // "Dn[]" is the 'all lanes' syntax.
2904      LaneKind = AllLanes;
2905      Parser.Lex(); // Eat the ']'.
2906      return MatchOperand_Success;
2907    }
2908
2909    // There's an optional '#' token here. Normally there wouldn't be, but
2910    // inline assemble puts one in, and it's friendly to accept that.
2911    if (Parser.getTok().is(AsmToken::Hash))
2912      Parser.Lex(); // Eat the '#'
2913
2914    const MCExpr *LaneIndex;
2915    SMLoc Loc = Parser.getTok().getLoc();
2916    if (getParser().ParseExpression(LaneIndex)) {
2917      Error(Loc, "illegal expression");
2918      return MatchOperand_ParseFail;
2919    }
2920    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2921    if (!CE) {
2922      Error(Loc, "lane index must be empty or an integer");
2923      return MatchOperand_ParseFail;
2924    }
2925    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2926      Error(Parser.getTok().getLoc(), "']' expected");
2927      return MatchOperand_ParseFail;
2928    }
2929    Parser.Lex(); // Eat the ']'.
2930    int64_t Val = CE->getValue();
2931
2932    // FIXME: Make this range check context sensitive for .8, .16, .32.
2933    if (Val < 0 || Val > 7) {
2934      Error(Parser.getTok().getLoc(), "lane index out of range");
2935      return MatchOperand_ParseFail;
2936    }
2937    Index = Val;
2938    LaneKind = IndexedLane;
2939    return MatchOperand_Success;
2940  }
2941  LaneKind = NoLanes;
2942  return MatchOperand_Success;
2943}
2944
2945// parse a vector register list
2946ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2947parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2948  VectorLaneTy LaneKind;
2949  unsigned LaneIndex;
2950  SMLoc S = Parser.getTok().getLoc();
2951  // As an extension (to match gas), support a plain D register or Q register
2952  // (without encosing curly braces) as a single or double entry list,
2953  // respectively.
2954  if (Parser.getTok().is(AsmToken::Identifier)) {
2955    int Reg = tryParseRegister();
2956    if (Reg == -1)
2957      return MatchOperand_NoMatch;
2958    SMLoc E = Parser.getTok().getLoc();
2959    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2960      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2961      if (Res != MatchOperand_Success)
2962        return Res;
2963      switch (LaneKind) {
2964      case NoLanes:
2965        E = Parser.getTok().getLoc();
2966        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2967        break;
2968      case AllLanes:
2969        E = Parser.getTok().getLoc();
2970        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2971                                                                S, E));
2972        break;
2973      case IndexedLane:
2974        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2975                                                               LaneIndex,
2976                                                               false, S, E));
2977        break;
2978      }
2979      return MatchOperand_Success;
2980    }
2981    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2982      Reg = getDRegFromQReg(Reg);
2983      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2984      if (Res != MatchOperand_Success)
2985        return Res;
2986      switch (LaneKind) {
2987      case NoLanes:
2988        E = Parser.getTok().getLoc();
2989        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
2990                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
2991        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2992        break;
2993      case AllLanes:
2994        E = Parser.getTok().getLoc();
2995        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
2996                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
2997        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2998                                                                S, E));
2999        break;
3000      case IndexedLane:
3001        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3002                                                               LaneIndex,
3003                                                               false, S, E));
3004        break;
3005      }
3006      return MatchOperand_Success;
3007    }
3008    Error(S, "vector register expected");
3009    return MatchOperand_ParseFail;
3010  }
3011
3012  if (Parser.getTok().isNot(AsmToken::LCurly))
3013    return MatchOperand_NoMatch;
3014
3015  Parser.Lex(); // Eat '{' token.
3016  SMLoc RegLoc = Parser.getTok().getLoc();
3017
3018  int Reg = tryParseRegister();
3019  if (Reg == -1) {
3020    Error(RegLoc, "register expected");
3021    return MatchOperand_ParseFail;
3022  }
3023  unsigned Count = 1;
3024  int Spacing = 0;
3025  unsigned FirstReg = Reg;
3026  // The list is of D registers, but we also allow Q regs and just interpret
3027  // them as the two D sub-registers.
3028  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3029    FirstReg = Reg = getDRegFromQReg(Reg);
3030    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3031                 // it's ambiguous with four-register single spaced.
3032    ++Reg;
3033    ++Count;
3034  }
3035  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3036    return MatchOperand_ParseFail;
3037
3038  while (Parser.getTok().is(AsmToken::Comma) ||
3039         Parser.getTok().is(AsmToken::Minus)) {
3040    if (Parser.getTok().is(AsmToken::Minus)) {
3041      if (!Spacing)
3042        Spacing = 1; // Register range implies a single spaced list.
3043      else if (Spacing == 2) {
3044        Error(Parser.getTok().getLoc(),
3045              "sequential registers in double spaced list");
3046        return MatchOperand_ParseFail;
3047      }
3048      Parser.Lex(); // Eat the minus.
3049      SMLoc EndLoc = Parser.getTok().getLoc();
3050      int EndReg = tryParseRegister();
3051      if (EndReg == -1) {
3052        Error(EndLoc, "register expected");
3053        return MatchOperand_ParseFail;
3054      }
3055      // Allow Q regs and just interpret them as the two D sub-registers.
3056      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3057        EndReg = getDRegFromQReg(EndReg) + 1;
3058      // If the register is the same as the start reg, there's nothing
3059      // more to do.
3060      if (Reg == EndReg)
3061        continue;
3062      // The register must be in the same register class as the first.
3063      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3064        Error(EndLoc, "invalid register in register list");
3065        return MatchOperand_ParseFail;
3066      }
3067      // Ranges must go from low to high.
3068      if (Reg > EndReg) {
3069        Error(EndLoc, "bad range in register list");
3070        return MatchOperand_ParseFail;
3071      }
3072      // Parse the lane specifier if present.
3073      VectorLaneTy NextLaneKind;
3074      unsigned NextLaneIndex;
3075      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3076        return MatchOperand_ParseFail;
3077      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3078        Error(EndLoc, "mismatched lane index in register list");
3079        return MatchOperand_ParseFail;
3080      }
3081      EndLoc = Parser.getTok().getLoc();
3082
3083      // Add all the registers in the range to the register list.
3084      Count += EndReg - Reg;
3085      Reg = EndReg;
3086      continue;
3087    }
3088    Parser.Lex(); // Eat the comma.
3089    RegLoc = Parser.getTok().getLoc();
3090    int OldReg = Reg;
3091    Reg = tryParseRegister();
3092    if (Reg == -1) {
3093      Error(RegLoc, "register expected");
3094      return MatchOperand_ParseFail;
3095    }
3096    // vector register lists must be contiguous.
3097    // It's OK to use the enumeration values directly here rather, as the
3098    // VFP register classes have the enum sorted properly.
3099    //
3100    // The list is of D registers, but we also allow Q regs and just interpret
3101    // them as the two D sub-registers.
3102    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3103      if (!Spacing)
3104        Spacing = 1; // Register range implies a single spaced list.
3105      else if (Spacing == 2) {
3106        Error(RegLoc,
3107              "invalid register in double-spaced list (must be 'D' register')");
3108        return MatchOperand_ParseFail;
3109      }
3110      Reg = getDRegFromQReg(Reg);
3111      if (Reg != OldReg + 1) {
3112        Error(RegLoc, "non-contiguous register range");
3113        return MatchOperand_ParseFail;
3114      }
3115      ++Reg;
3116      Count += 2;
3117      // Parse the lane specifier if present.
3118      VectorLaneTy NextLaneKind;
3119      unsigned NextLaneIndex;
3120      SMLoc EndLoc = Parser.getTok().getLoc();
3121      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3122        return MatchOperand_ParseFail;
3123      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3124        Error(EndLoc, "mismatched lane index in register list");
3125        return MatchOperand_ParseFail;
3126      }
3127      continue;
3128    }
3129    // Normal D register.
3130    // Figure out the register spacing (single or double) of the list if
3131    // we don't know it already.
3132    if (!Spacing)
3133      Spacing = 1 + (Reg == OldReg + 2);
3134
3135    // Just check that it's contiguous and keep going.
3136    if (Reg != OldReg + Spacing) {
3137      Error(RegLoc, "non-contiguous register range");
3138      return MatchOperand_ParseFail;
3139    }
3140    ++Count;
3141    // Parse the lane specifier if present.
3142    VectorLaneTy NextLaneKind;
3143    unsigned NextLaneIndex;
3144    SMLoc EndLoc = Parser.getTok().getLoc();
3145    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3146      return MatchOperand_ParseFail;
3147    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3148      Error(EndLoc, "mismatched lane index in register list");
3149      return MatchOperand_ParseFail;
3150    }
3151  }
3152
3153  SMLoc E = Parser.getTok().getLoc();
3154  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3155    Error(E, "'}' expected");
3156    return MatchOperand_ParseFail;
3157  }
3158  Parser.Lex(); // Eat '}' token.
3159
3160  switch (LaneKind) {
3161  case NoLanes:
3162    // Two-register operands have been converted to the
3163    // composite register classes.
3164    if (Count == 2) {
3165      const MCRegisterClass *RC = (Spacing == 1) ?
3166        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3167        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3168      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3169    }
3170
3171    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3172                                                    (Spacing == 2), S, E));
3173    break;
3174  case AllLanes:
3175    // Two-register operands have been converted to the
3176    // composite register classes.
3177    if (Count == 2) {
3178      const MCRegisterClass *RC = (Spacing == 1) ?
3179        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3180        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3181      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3182    }
3183    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3184                                                            (Spacing == 2),
3185                                                            S, E));
3186    break;
3187  case IndexedLane:
3188    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3189                                                           LaneIndex,
3190                                                           (Spacing == 2),
3191                                                           S, E));
3192    break;
3193  }
3194  return MatchOperand_Success;
3195}
3196
3197/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3198ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3199parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3200  SMLoc S = Parser.getTok().getLoc();
3201  const AsmToken &Tok = Parser.getTok();
3202  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3203  StringRef OptStr = Tok.getString();
3204
3205  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3206    .Case("sy",    ARM_MB::SY)
3207    .Case("st",    ARM_MB::ST)
3208    .Case("sh",    ARM_MB::ISH)
3209    .Case("ish",   ARM_MB::ISH)
3210    .Case("shst",  ARM_MB::ISHST)
3211    .Case("ishst", ARM_MB::ISHST)
3212    .Case("nsh",   ARM_MB::NSH)
3213    .Case("un",    ARM_MB::NSH)
3214    .Case("nshst", ARM_MB::NSHST)
3215    .Case("unst",  ARM_MB::NSHST)
3216    .Case("osh",   ARM_MB::OSH)
3217    .Case("oshst", ARM_MB::OSHST)
3218    .Default(~0U);
3219
3220  if (Opt == ~0U)
3221    return MatchOperand_NoMatch;
3222
3223  Parser.Lex(); // Eat identifier token.
3224  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3225  return MatchOperand_Success;
3226}
3227
3228/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3229ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3230parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3231  SMLoc S = Parser.getTok().getLoc();
3232  const AsmToken &Tok = Parser.getTok();
3233  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3234  StringRef IFlagsStr = Tok.getString();
3235
3236  // An iflags string of "none" is interpreted to mean that none of the AIF
3237  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3238  unsigned IFlags = 0;
3239  if (IFlagsStr != "none") {
3240        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3241      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3242        .Case("a", ARM_PROC::A)
3243        .Case("i", ARM_PROC::I)
3244        .Case("f", ARM_PROC::F)
3245        .Default(~0U);
3246
3247      // If some specific iflag is already set, it means that some letter is
3248      // present more than once, this is not acceptable.
3249      if (Flag == ~0U || (IFlags & Flag))
3250        return MatchOperand_NoMatch;
3251
3252      IFlags |= Flag;
3253    }
3254  }
3255
3256  Parser.Lex(); // Eat identifier token.
3257  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3258  return MatchOperand_Success;
3259}
3260
3261/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3262ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3263parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3264  SMLoc S = Parser.getTok().getLoc();
3265  const AsmToken &Tok = Parser.getTok();
3266  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3267  StringRef Mask = Tok.getString();
3268
3269  if (isMClass()) {
3270    // See ARMv6-M 10.1.1
3271    std::string Name = Mask.lower();
3272    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3273      .Case("apsr", 0)
3274      .Case("iapsr", 1)
3275      .Case("eapsr", 2)
3276      .Case("xpsr", 3)
3277      .Case("ipsr", 5)
3278      .Case("epsr", 6)
3279      .Case("iepsr", 7)
3280      .Case("msp", 8)
3281      .Case("psp", 9)
3282      .Case("primask", 16)
3283      .Case("basepri", 17)
3284      .Case("basepri_max", 18)
3285      .Case("faultmask", 19)
3286      .Case("control", 20)
3287      .Default(~0U);
3288
3289    if (FlagsVal == ~0U)
3290      return MatchOperand_NoMatch;
3291
3292    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3293      // basepri, basepri_max and faultmask only valid for V7m.
3294      return MatchOperand_NoMatch;
3295
3296    Parser.Lex(); // Eat identifier token.
3297    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3298    return MatchOperand_Success;
3299  }
3300
3301  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3302  size_t Start = 0, Next = Mask.find('_');
3303  StringRef Flags = "";
3304  std::string SpecReg = Mask.slice(Start, Next).lower();
3305  if (Next != StringRef::npos)
3306    Flags = Mask.slice(Next+1, Mask.size());
3307
3308  // FlagsVal contains the complete mask:
3309  // 3-0: Mask
3310  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3311  unsigned FlagsVal = 0;
3312
3313  if (SpecReg == "apsr") {
3314    FlagsVal = StringSwitch<unsigned>(Flags)
3315    .Case("nzcvq",  0x8) // same as CPSR_f
3316    .Case("g",      0x4) // same as CPSR_s
3317    .Case("nzcvqg", 0xc) // same as CPSR_fs
3318    .Default(~0U);
3319
3320    if (FlagsVal == ~0U) {
3321      if (!Flags.empty())
3322        return MatchOperand_NoMatch;
3323      else
3324        FlagsVal = 8; // No flag
3325    }
3326  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3327    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3328      Flags = "fc";
3329    for (int i = 0, e = Flags.size(); i != e; ++i) {
3330      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3331      .Case("c", 1)
3332      .Case("x", 2)
3333      .Case("s", 4)
3334      .Case("f", 8)
3335      .Default(~0U);
3336
3337      // If some specific flag is already set, it means that some letter is
3338      // present more than once, this is not acceptable.
3339      if (FlagsVal == ~0U || (FlagsVal & Flag))
3340        return MatchOperand_NoMatch;
3341      FlagsVal |= Flag;
3342    }
3343  } else // No match for special register.
3344    return MatchOperand_NoMatch;
3345
3346  // Special register without flags is NOT equivalent to "fc" flags.
3347  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3348  // two lines would enable gas compatibility at the expense of breaking
3349  // round-tripping.
3350  //
3351  // if (!FlagsVal)
3352  //  FlagsVal = 0x9;
3353
3354  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3355  if (SpecReg == "spsr")
3356    FlagsVal |= 16;
3357
3358  Parser.Lex(); // Eat identifier token.
3359  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3360  return MatchOperand_Success;
3361}
3362
3363ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3364parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3365            int Low, int High) {
3366  const AsmToken &Tok = Parser.getTok();
3367  if (Tok.isNot(AsmToken::Identifier)) {
3368    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3369    return MatchOperand_ParseFail;
3370  }
3371  StringRef ShiftName = Tok.getString();
3372  std::string LowerOp = Op.lower();
3373  std::string UpperOp = Op.upper();
3374  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3375    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3376    return MatchOperand_ParseFail;
3377  }
3378  Parser.Lex(); // Eat shift type token.
3379
3380  // There must be a '#' and a shift amount.
3381  if (Parser.getTok().isNot(AsmToken::Hash) &&
3382      Parser.getTok().isNot(AsmToken::Dollar)) {
3383    Error(Parser.getTok().getLoc(), "'#' expected");
3384    return MatchOperand_ParseFail;
3385  }
3386  Parser.Lex(); // Eat hash token.
3387
3388  const MCExpr *ShiftAmount;
3389  SMLoc Loc = Parser.getTok().getLoc();
3390  if (getParser().ParseExpression(ShiftAmount)) {
3391    Error(Loc, "illegal expression");
3392    return MatchOperand_ParseFail;
3393  }
3394  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3395  if (!CE) {
3396    Error(Loc, "constant expression expected");
3397    return MatchOperand_ParseFail;
3398  }
3399  int Val = CE->getValue();
3400  if (Val < Low || Val > High) {
3401    Error(Loc, "immediate value out of range");
3402    return MatchOperand_ParseFail;
3403  }
3404
3405  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3406
3407  return MatchOperand_Success;
3408}
3409
3410ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3411parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3412  const AsmToken &Tok = Parser.getTok();
3413  SMLoc S = Tok.getLoc();
3414  if (Tok.isNot(AsmToken::Identifier)) {
3415    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3416    return MatchOperand_ParseFail;
3417  }
3418  int Val = StringSwitch<int>(Tok.getString())
3419    .Case("be", 1)
3420    .Case("le", 0)
3421    .Default(-1);
3422  Parser.Lex(); // Eat the token.
3423
3424  if (Val == -1) {
3425    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3426    return MatchOperand_ParseFail;
3427  }
3428  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3429                                                                  getContext()),
3430                                           S, Parser.getTok().getLoc()));
3431  return MatchOperand_Success;
3432}
3433
3434/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3435/// instructions. Legal values are:
3436///     lsl #n  'n' in [0,31]
3437///     asr #n  'n' in [1,32]
3438///             n == 32 encoded as n == 0.
3439ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3440parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3441  const AsmToken &Tok = Parser.getTok();
3442  SMLoc S = Tok.getLoc();
3443  if (Tok.isNot(AsmToken::Identifier)) {
3444    Error(S, "shift operator 'asr' or 'lsl' expected");
3445    return MatchOperand_ParseFail;
3446  }
3447  StringRef ShiftName = Tok.getString();
3448  bool isASR;
3449  if (ShiftName == "lsl" || ShiftName == "LSL")
3450    isASR = false;
3451  else if (ShiftName == "asr" || ShiftName == "ASR")
3452    isASR = true;
3453  else {
3454    Error(S, "shift operator 'asr' or 'lsl' expected");
3455    return MatchOperand_ParseFail;
3456  }
3457  Parser.Lex(); // Eat the operator.
3458
3459  // A '#' and a shift amount.
3460  if (Parser.getTok().isNot(AsmToken::Hash) &&
3461      Parser.getTok().isNot(AsmToken::Dollar)) {
3462    Error(Parser.getTok().getLoc(), "'#' expected");
3463    return MatchOperand_ParseFail;
3464  }
3465  Parser.Lex(); // Eat hash token.
3466
3467  const MCExpr *ShiftAmount;
3468  SMLoc E = Parser.getTok().getLoc();
3469  if (getParser().ParseExpression(ShiftAmount)) {
3470    Error(E, "malformed shift expression");
3471    return MatchOperand_ParseFail;
3472  }
3473  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3474  if (!CE) {
3475    Error(E, "shift amount must be an immediate");
3476    return MatchOperand_ParseFail;
3477  }
3478
3479  int64_t Val = CE->getValue();
3480  if (isASR) {
3481    // Shift amount must be in [1,32]
3482    if (Val < 1 || Val > 32) {
3483      Error(E, "'asr' shift amount must be in range [1,32]");
3484      return MatchOperand_ParseFail;
3485    }
3486    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3487    if (isThumb() && Val == 32) {
3488      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3489      return MatchOperand_ParseFail;
3490    }
3491    if (Val == 32) Val = 0;
3492  } else {
3493    // Shift amount must be in [1,32]
3494    if (Val < 0 || Val > 31) {
3495      Error(E, "'lsr' shift amount must be in range [0,31]");
3496      return MatchOperand_ParseFail;
3497    }
3498  }
3499
3500  E = Parser.getTok().getLoc();
3501  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3502
3503  return MatchOperand_Success;
3504}
3505
3506/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3507/// of instructions. Legal values are:
3508///     ror #n  'n' in {0, 8, 16, 24}
3509ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3510parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3511  const AsmToken &Tok = Parser.getTok();
3512  SMLoc S = Tok.getLoc();
3513  if (Tok.isNot(AsmToken::Identifier))
3514    return MatchOperand_NoMatch;
3515  StringRef ShiftName = Tok.getString();
3516  if (ShiftName != "ror" && ShiftName != "ROR")
3517    return MatchOperand_NoMatch;
3518  Parser.Lex(); // Eat the operator.
3519
3520  // A '#' and a rotate amount.
3521  if (Parser.getTok().isNot(AsmToken::Hash) &&
3522      Parser.getTok().isNot(AsmToken::Dollar)) {
3523    Error(Parser.getTok().getLoc(), "'#' expected");
3524    return MatchOperand_ParseFail;
3525  }
3526  Parser.Lex(); // Eat hash token.
3527
3528  const MCExpr *ShiftAmount;
3529  SMLoc E = Parser.getTok().getLoc();
3530  if (getParser().ParseExpression(ShiftAmount)) {
3531    Error(E, "malformed rotate expression");
3532    return MatchOperand_ParseFail;
3533  }
3534  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3535  if (!CE) {
3536    Error(E, "rotate amount must be an immediate");
3537    return MatchOperand_ParseFail;
3538  }
3539
3540  int64_t Val = CE->getValue();
3541  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3542  // normally, zero is represented in asm by omitting the rotate operand
3543  // entirely.
3544  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3545    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3546    return MatchOperand_ParseFail;
3547  }
3548
3549  E = Parser.getTok().getLoc();
3550  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3551
3552  return MatchOperand_Success;
3553}
3554
3555ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3556parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3557  SMLoc S = Parser.getTok().getLoc();
3558  // The bitfield descriptor is really two operands, the LSB and the width.
3559  if (Parser.getTok().isNot(AsmToken::Hash) &&
3560      Parser.getTok().isNot(AsmToken::Dollar)) {
3561    Error(Parser.getTok().getLoc(), "'#' expected");
3562    return MatchOperand_ParseFail;
3563  }
3564  Parser.Lex(); // Eat hash token.
3565
3566  const MCExpr *LSBExpr;
3567  SMLoc E = Parser.getTok().getLoc();
3568  if (getParser().ParseExpression(LSBExpr)) {
3569    Error(E, "malformed immediate expression");
3570    return MatchOperand_ParseFail;
3571  }
3572  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3573  if (!CE) {
3574    Error(E, "'lsb' operand must be an immediate");
3575    return MatchOperand_ParseFail;
3576  }
3577
3578  int64_t LSB = CE->getValue();
3579  // The LSB must be in the range [0,31]
3580  if (LSB < 0 || LSB > 31) {
3581    Error(E, "'lsb' operand must be in the range [0,31]");
3582    return MatchOperand_ParseFail;
3583  }
3584  E = Parser.getTok().getLoc();
3585
3586  // Expect another immediate operand.
3587  if (Parser.getTok().isNot(AsmToken::Comma)) {
3588    Error(Parser.getTok().getLoc(), "too few operands");
3589    return MatchOperand_ParseFail;
3590  }
3591  Parser.Lex(); // Eat hash token.
3592  if (Parser.getTok().isNot(AsmToken::Hash) &&
3593      Parser.getTok().isNot(AsmToken::Dollar)) {
3594    Error(Parser.getTok().getLoc(), "'#' expected");
3595    return MatchOperand_ParseFail;
3596  }
3597  Parser.Lex(); // Eat hash token.
3598
3599  const MCExpr *WidthExpr;
3600  if (getParser().ParseExpression(WidthExpr)) {
3601    Error(E, "malformed immediate expression");
3602    return MatchOperand_ParseFail;
3603  }
3604  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3605  if (!CE) {
3606    Error(E, "'width' operand must be an immediate");
3607    return MatchOperand_ParseFail;
3608  }
3609
3610  int64_t Width = CE->getValue();
3611  // The LSB must be in the range [1,32-lsb]
3612  if (Width < 1 || Width > 32 - LSB) {
3613    Error(E, "'width' operand must be in the range [1,32-lsb]");
3614    return MatchOperand_ParseFail;
3615  }
3616  E = Parser.getTok().getLoc();
3617
3618  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3619
3620  return MatchOperand_Success;
3621}
3622
3623ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3624parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3625  // Check for a post-index addressing register operand. Specifically:
3626  // postidx_reg := '+' register {, shift}
3627  //              | '-' register {, shift}
3628  //              | register {, shift}
3629
3630  // This method must return MatchOperand_NoMatch without consuming any tokens
3631  // in the case where there is no match, as other alternatives take other
3632  // parse methods.
3633  AsmToken Tok = Parser.getTok();
3634  SMLoc S = Tok.getLoc();
3635  bool haveEaten = false;
3636  bool isAdd = true;
3637  int Reg = -1;
3638  if (Tok.is(AsmToken::Plus)) {
3639    Parser.Lex(); // Eat the '+' token.
3640    haveEaten = true;
3641  } else if (Tok.is(AsmToken::Minus)) {
3642    Parser.Lex(); // Eat the '-' token.
3643    isAdd = false;
3644    haveEaten = true;
3645  }
3646  if (Parser.getTok().is(AsmToken::Identifier))
3647    Reg = tryParseRegister();
3648  if (Reg == -1) {
3649    if (!haveEaten)
3650      return MatchOperand_NoMatch;
3651    Error(Parser.getTok().getLoc(), "register expected");
3652    return MatchOperand_ParseFail;
3653  }
3654  SMLoc E = Parser.getTok().getLoc();
3655
3656  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3657  unsigned ShiftImm = 0;
3658  if (Parser.getTok().is(AsmToken::Comma)) {
3659    Parser.Lex(); // Eat the ','.
3660    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3661      return MatchOperand_ParseFail;
3662  }
3663
3664  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3665                                                  ShiftImm, S, E));
3666
3667  return MatchOperand_Success;
3668}
3669
3670ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3671parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3672  // Check for a post-index addressing register operand. Specifically:
3673  // am3offset := '+' register
3674  //              | '-' register
3675  //              | register
3676  //              | # imm
3677  //              | # + imm
3678  //              | # - imm
3679
3680  // This method must return MatchOperand_NoMatch without consuming any tokens
3681  // in the case where there is no match, as other alternatives take other
3682  // parse methods.
3683  AsmToken Tok = Parser.getTok();
3684  SMLoc S = Tok.getLoc();
3685
3686  // Do immediates first, as we always parse those if we have a '#'.
3687  if (Parser.getTok().is(AsmToken::Hash) ||
3688      Parser.getTok().is(AsmToken::Dollar)) {
3689    Parser.Lex(); // Eat the '#'.
3690    // Explicitly look for a '-', as we need to encode negative zero
3691    // differently.
3692    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3693    const MCExpr *Offset;
3694    if (getParser().ParseExpression(Offset))
3695      return MatchOperand_ParseFail;
3696    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3697    if (!CE) {
3698      Error(S, "constant expression expected");
3699      return MatchOperand_ParseFail;
3700    }
3701    SMLoc E = Tok.getLoc();
3702    // Negative zero is encoded as the flag value INT32_MIN.
3703    int32_t Val = CE->getValue();
3704    if (isNegative && Val == 0)
3705      Val = INT32_MIN;
3706
3707    Operands.push_back(
3708      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3709
3710    return MatchOperand_Success;
3711  }
3712
3713
3714  bool haveEaten = false;
3715  bool isAdd = true;
3716  int Reg = -1;
3717  if (Tok.is(AsmToken::Plus)) {
3718    Parser.Lex(); // Eat the '+' token.
3719    haveEaten = true;
3720  } else if (Tok.is(AsmToken::Minus)) {
3721    Parser.Lex(); // Eat the '-' token.
3722    isAdd = false;
3723    haveEaten = true;
3724  }
3725  if (Parser.getTok().is(AsmToken::Identifier))
3726    Reg = tryParseRegister();
3727  if (Reg == -1) {
3728    if (!haveEaten)
3729      return MatchOperand_NoMatch;
3730    Error(Parser.getTok().getLoc(), "register expected");
3731    return MatchOperand_ParseFail;
3732  }
3733  SMLoc E = Parser.getTok().getLoc();
3734
3735  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3736                                                  0, S, E));
3737
3738  return MatchOperand_Success;
3739}
3740
3741/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3742/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3743/// when they refer multiple MIOperands inside a single one.
3744bool ARMAsmParser::
3745cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3746             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3747  // Rt, Rt2
3748  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3749  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3750  // Create a writeback register dummy placeholder.
3751  Inst.addOperand(MCOperand::CreateReg(0));
3752  // addr
3753  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3754  // pred
3755  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3756  return true;
3757}
3758
3759/// cvtT2StrdPre - Convert parsed operands to MCInst.
3760/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3761/// when they refer multiple MIOperands inside a single one.
3762bool ARMAsmParser::
3763cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3764             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3765  // Create a writeback register dummy placeholder.
3766  Inst.addOperand(MCOperand::CreateReg(0));
3767  // Rt, Rt2
3768  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3769  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3770  // addr
3771  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3772  // pred
3773  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3774  return true;
3775}
3776
3777/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3778/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3779/// when they refer multiple MIOperands inside a single one.
3780bool ARMAsmParser::
3781cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3782                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3783  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3784
3785  // Create a writeback register dummy placeholder.
3786  Inst.addOperand(MCOperand::CreateImm(0));
3787
3788  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3789  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3790  return true;
3791}
3792
3793/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3794/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3795/// when they refer multiple MIOperands inside a single one.
3796bool ARMAsmParser::
3797cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3798                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3799  // Create a writeback register dummy placeholder.
3800  Inst.addOperand(MCOperand::CreateImm(0));
3801  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3802  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3803  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3804  return true;
3805}
3806
3807/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3808/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3809/// when they refer multiple MIOperands inside a single one.
3810bool ARMAsmParser::
3811cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3812                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3813  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3814
3815  // Create a writeback register dummy placeholder.
3816  Inst.addOperand(MCOperand::CreateImm(0));
3817
3818  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3819  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3820  return true;
3821}
3822
3823/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3824/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3825/// when they refer multiple MIOperands inside a single one.
3826bool ARMAsmParser::
3827cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3828                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3829  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3830
3831  // Create a writeback register dummy placeholder.
3832  Inst.addOperand(MCOperand::CreateImm(0));
3833
3834  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3835  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3836  return true;
3837}
3838
3839
3840/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3841/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3842/// when they refer multiple MIOperands inside a single one.
3843bool ARMAsmParser::
3844cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3845                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3846  // Create a writeback register dummy placeholder.
3847  Inst.addOperand(MCOperand::CreateImm(0));
3848  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3849  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3850  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3851  return true;
3852}
3853
3854/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3855/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3856/// when they refer multiple MIOperands inside a single one.
3857bool ARMAsmParser::
3858cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3859                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3860  // Create a writeback register dummy placeholder.
3861  Inst.addOperand(MCOperand::CreateImm(0));
3862  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3863  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3864  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3865  return true;
3866}
3867
3868/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3869/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3870/// when they refer multiple MIOperands inside a single one.
3871bool ARMAsmParser::
3872cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3873                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3874  // Create a writeback register dummy placeholder.
3875  Inst.addOperand(MCOperand::CreateImm(0));
3876  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3877  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3878  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3879  return true;
3880}
3881
3882/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3883/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3884/// when they refer multiple MIOperands inside a single one.
3885bool ARMAsmParser::
3886cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3887                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3888  // Rt
3889  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3890  // Create a writeback register dummy placeholder.
3891  Inst.addOperand(MCOperand::CreateImm(0));
3892  // addr
3893  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3894  // offset
3895  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3896  // pred
3897  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3898  return true;
3899}
3900
3901/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3902/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3903/// when they refer multiple MIOperands inside a single one.
3904bool ARMAsmParser::
3905cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3906                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3907  // Rt
3908  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3909  // Create a writeback register dummy placeholder.
3910  Inst.addOperand(MCOperand::CreateImm(0));
3911  // addr
3912  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3913  // offset
3914  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3915  // pred
3916  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3917  return true;
3918}
3919
3920/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3921/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3922/// when they refer multiple MIOperands inside a single one.
3923bool ARMAsmParser::
3924cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3925                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3926  // Create a writeback register dummy placeholder.
3927  Inst.addOperand(MCOperand::CreateImm(0));
3928  // Rt
3929  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3930  // addr
3931  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3932  // offset
3933  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3934  // pred
3935  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3936  return true;
3937}
3938
3939/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3940/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3941/// when they refer multiple MIOperands inside a single one.
3942bool ARMAsmParser::
3943cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3944                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3945  // Create a writeback register dummy placeholder.
3946  Inst.addOperand(MCOperand::CreateImm(0));
3947  // Rt
3948  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3949  // addr
3950  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3951  // offset
3952  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3953  // pred
3954  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3955  return true;
3956}
3957
3958/// cvtLdrdPre - Convert parsed operands to MCInst.
3959/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3960/// when they refer multiple MIOperands inside a single one.
3961bool ARMAsmParser::
3962cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3963           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3964  // Rt, Rt2
3965  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3966  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3967  // Create a writeback register dummy placeholder.
3968  Inst.addOperand(MCOperand::CreateImm(0));
3969  // addr
3970  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3971  // pred
3972  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3973  return true;
3974}
3975
3976/// cvtStrdPre - Convert parsed operands to MCInst.
3977/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3978/// when they refer multiple MIOperands inside a single one.
3979bool ARMAsmParser::
3980cvtStrdPre(MCInst &Inst, unsigned Opcode,
3981           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3982  // Create a writeback register dummy placeholder.
3983  Inst.addOperand(MCOperand::CreateImm(0));
3984  // Rt, Rt2
3985  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3986  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3987  // addr
3988  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3989  // pred
3990  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3991  return true;
3992}
3993
3994/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3995/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3996/// when they refer multiple MIOperands inside a single one.
3997bool ARMAsmParser::
3998cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3999                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4000  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4001  // Create a writeback register dummy placeholder.
4002  Inst.addOperand(MCOperand::CreateImm(0));
4003  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4004  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4005  return true;
4006}
4007
4008/// cvtThumbMultiple- Convert parsed operands to MCInst.
4009/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4010/// when they refer multiple MIOperands inside a single one.
4011bool ARMAsmParser::
4012cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4013           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4014  // The second source operand must be the same register as the destination
4015  // operand.
4016  if (Operands.size() == 6 &&
4017      (((ARMOperand*)Operands[3])->getReg() !=
4018       ((ARMOperand*)Operands[5])->getReg()) &&
4019      (((ARMOperand*)Operands[3])->getReg() !=
4020       ((ARMOperand*)Operands[4])->getReg())) {
4021    Error(Operands[3]->getStartLoc(),
4022          "destination register must match source register");
4023    return false;
4024  }
4025  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4026  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4027  // If we have a three-operand form, make sure to set Rn to be the operand
4028  // that isn't the same as Rd.
4029  unsigned RegOp = 4;
4030  if (Operands.size() == 6 &&
4031      ((ARMOperand*)Operands[4])->getReg() ==
4032        ((ARMOperand*)Operands[3])->getReg())
4033    RegOp = 5;
4034  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4035  Inst.addOperand(Inst.getOperand(0));
4036  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4037
4038  return true;
4039}
4040
4041bool ARMAsmParser::
4042cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4043              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4044  // Vd
4045  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4046  // Create a writeback register dummy placeholder.
4047  Inst.addOperand(MCOperand::CreateImm(0));
4048  // Vn
4049  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4050  // pred
4051  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4052  return true;
4053}
4054
4055bool ARMAsmParser::
4056cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4057                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4058  // Vd
4059  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4060  // Create a writeback register dummy placeholder.
4061  Inst.addOperand(MCOperand::CreateImm(0));
4062  // Vn
4063  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4064  // Vm
4065  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4066  // pred
4067  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4068  return true;
4069}
4070
4071bool ARMAsmParser::
4072cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4073              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4074  // Create a writeback register dummy placeholder.
4075  Inst.addOperand(MCOperand::CreateImm(0));
4076  // Vn
4077  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4078  // Vt
4079  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4080  // pred
4081  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4082  return true;
4083}
4084
4085bool ARMAsmParser::
4086cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4087                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4088  // Create a writeback register dummy placeholder.
4089  Inst.addOperand(MCOperand::CreateImm(0));
4090  // Vn
4091  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4092  // Vm
4093  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4094  // Vt
4095  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4096  // pred
4097  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4098  return true;
4099}
4100
4101/// Parse an ARM memory expression, return false if successful else return true
4102/// or an error.  The first token must be a '[' when called.
4103bool ARMAsmParser::
4104parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4105  SMLoc S, E;
4106  assert(Parser.getTok().is(AsmToken::LBrac) &&
4107         "Token is not a Left Bracket");
4108  S = Parser.getTok().getLoc();
4109  Parser.Lex(); // Eat left bracket token.
4110
4111  const AsmToken &BaseRegTok = Parser.getTok();
4112  int BaseRegNum = tryParseRegister();
4113  if (BaseRegNum == -1)
4114    return Error(BaseRegTok.getLoc(), "register expected");
4115
4116  // The next token must either be a comma or a closing bracket.
4117  const AsmToken &Tok = Parser.getTok();
4118  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4119    return Error(Tok.getLoc(), "malformed memory operand");
4120
4121  if (Tok.is(AsmToken::RBrac)) {
4122    E = Tok.getLoc();
4123    Parser.Lex(); // Eat right bracket token.
4124
4125    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4126                                             0, 0, false, S, E));
4127
4128    // If there's a pre-indexing writeback marker, '!', just add it as a token
4129    // operand. It's rather odd, but syntactically valid.
4130    if (Parser.getTok().is(AsmToken::Exclaim)) {
4131      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4132      Parser.Lex(); // Eat the '!'.
4133    }
4134
4135    return false;
4136  }
4137
4138  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4139  Parser.Lex(); // Eat the comma.
4140
4141  // If we have a ':', it's an alignment specifier.
4142  if (Parser.getTok().is(AsmToken::Colon)) {
4143    Parser.Lex(); // Eat the ':'.
4144    E = Parser.getTok().getLoc();
4145
4146    const MCExpr *Expr;
4147    if (getParser().ParseExpression(Expr))
4148     return true;
4149
4150    // The expression has to be a constant. Memory references with relocations
4151    // don't come through here, as they use the <label> forms of the relevant
4152    // instructions.
4153    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4154    if (!CE)
4155      return Error (E, "constant expression expected");
4156
4157    unsigned Align = 0;
4158    switch (CE->getValue()) {
4159    default:
4160      return Error(E,
4161                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4162    case 16:  Align = 2; break;
4163    case 32:  Align = 4; break;
4164    case 64:  Align = 8; break;
4165    case 128: Align = 16; break;
4166    case 256: Align = 32; break;
4167    }
4168
4169    // Now we should have the closing ']'
4170    E = Parser.getTok().getLoc();
4171    if (Parser.getTok().isNot(AsmToken::RBrac))
4172      return Error(E, "']' expected");
4173    Parser.Lex(); // Eat right bracket token.
4174
4175    // Don't worry about range checking the value here. That's handled by
4176    // the is*() predicates.
4177    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4178                                             ARM_AM::no_shift, 0, Align,
4179                                             false, S, E));
4180
4181    // If there's a pre-indexing writeback marker, '!', just add it as a token
4182    // operand.
4183    if (Parser.getTok().is(AsmToken::Exclaim)) {
4184      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4185      Parser.Lex(); // Eat the '!'.
4186    }
4187
4188    return false;
4189  }
4190
4191  // If we have a '#', it's an immediate offset, else assume it's a register
4192  // offset. Be friendly and also accept a plain integer (without a leading
4193  // hash) for gas compatibility.
4194  if (Parser.getTok().is(AsmToken::Hash) ||
4195      Parser.getTok().is(AsmToken::Dollar) ||
4196      Parser.getTok().is(AsmToken::Integer)) {
4197    if (Parser.getTok().isNot(AsmToken::Integer))
4198      Parser.Lex(); // Eat the '#'.
4199    E = Parser.getTok().getLoc();
4200
4201    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4202    const MCExpr *Offset;
4203    if (getParser().ParseExpression(Offset))
4204     return true;
4205
4206    // The expression has to be a constant. Memory references with relocations
4207    // don't come through here, as they use the <label> forms of the relevant
4208    // instructions.
4209    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4210    if (!CE)
4211      return Error (E, "constant expression expected");
4212
4213    // If the constant was #-0, represent it as INT32_MIN.
4214    int32_t Val = CE->getValue();
4215    if (isNegative && Val == 0)
4216      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4217
4218    // Now we should have the closing ']'
4219    E = Parser.getTok().getLoc();
4220    if (Parser.getTok().isNot(AsmToken::RBrac))
4221      return Error(E, "']' expected");
4222    Parser.Lex(); // Eat right bracket token.
4223
4224    // Don't worry about range checking the value here. That's handled by
4225    // the is*() predicates.
4226    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4227                                             ARM_AM::no_shift, 0, 0,
4228                                             false, S, E));
4229
4230    // If there's a pre-indexing writeback marker, '!', just add it as a token
4231    // operand.
4232    if (Parser.getTok().is(AsmToken::Exclaim)) {
4233      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4234      Parser.Lex(); // Eat the '!'.
4235    }
4236
4237    return false;
4238  }
4239
4240  // The register offset is optionally preceded by a '+' or '-'
4241  bool isNegative = false;
4242  if (Parser.getTok().is(AsmToken::Minus)) {
4243    isNegative = true;
4244    Parser.Lex(); // Eat the '-'.
4245  } else if (Parser.getTok().is(AsmToken::Plus)) {
4246    // Nothing to do.
4247    Parser.Lex(); // Eat the '+'.
4248  }
4249
4250  E = Parser.getTok().getLoc();
4251  int OffsetRegNum = tryParseRegister();
4252  if (OffsetRegNum == -1)
4253    return Error(E, "register expected");
4254
4255  // If there's a shift operator, handle it.
4256  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4257  unsigned ShiftImm = 0;
4258  if (Parser.getTok().is(AsmToken::Comma)) {
4259    Parser.Lex(); // Eat the ','.
4260    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4261      return true;
4262  }
4263
4264  // Now we should have the closing ']'
4265  E = Parser.getTok().getLoc();
4266  if (Parser.getTok().isNot(AsmToken::RBrac))
4267    return Error(E, "']' expected");
4268  Parser.Lex(); // Eat right bracket token.
4269
4270  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4271                                           ShiftType, ShiftImm, 0, isNegative,
4272                                           S, E));
4273
4274  // If there's a pre-indexing writeback marker, '!', just add it as a token
4275  // operand.
4276  if (Parser.getTok().is(AsmToken::Exclaim)) {
4277    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4278    Parser.Lex(); // Eat the '!'.
4279  }
4280
4281  return false;
4282}
4283
4284/// parseMemRegOffsetShift - one of these two:
4285///   ( lsl | lsr | asr | ror ) , # shift_amount
4286///   rrx
4287/// return true if it parses a shift otherwise it returns false.
4288bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4289                                          unsigned &Amount) {
4290  SMLoc Loc = Parser.getTok().getLoc();
4291  const AsmToken &Tok = Parser.getTok();
4292  if (Tok.isNot(AsmToken::Identifier))
4293    return true;
4294  StringRef ShiftName = Tok.getString();
4295  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4296      ShiftName == "asl" || ShiftName == "ASL")
4297    St = ARM_AM::lsl;
4298  else if (ShiftName == "lsr" || ShiftName == "LSR")
4299    St = ARM_AM::lsr;
4300  else if (ShiftName == "asr" || ShiftName == "ASR")
4301    St = ARM_AM::asr;
4302  else if (ShiftName == "ror" || ShiftName == "ROR")
4303    St = ARM_AM::ror;
4304  else if (ShiftName == "rrx" || ShiftName == "RRX")
4305    St = ARM_AM::rrx;
4306  else
4307    return Error(Loc, "illegal shift operator");
4308  Parser.Lex(); // Eat shift type token.
4309
4310  // rrx stands alone.
4311  Amount = 0;
4312  if (St != ARM_AM::rrx) {
4313    Loc = Parser.getTok().getLoc();
4314    // A '#' and a shift amount.
4315    const AsmToken &HashTok = Parser.getTok();
4316    if (HashTok.isNot(AsmToken::Hash) &&
4317        HashTok.isNot(AsmToken::Dollar))
4318      return Error(HashTok.getLoc(), "'#' expected");
4319    Parser.Lex(); // Eat hash token.
4320
4321    const MCExpr *Expr;
4322    if (getParser().ParseExpression(Expr))
4323      return true;
4324    // Range check the immediate.
4325    // lsl, ror: 0 <= imm <= 31
4326    // lsr, asr: 0 <= imm <= 32
4327    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4328    if (!CE)
4329      return Error(Loc, "shift amount must be an immediate");
4330    int64_t Imm = CE->getValue();
4331    if (Imm < 0 ||
4332        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4333        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4334      return Error(Loc, "immediate shift value out of range");
4335    Amount = Imm;
4336  }
4337
4338  return false;
4339}
4340
4341/// parseFPImm - A floating point immediate expression operand.
4342ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4343parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4344  // Anything that can accept a floating point constant as an operand
4345  // needs to go through here, as the regular ParseExpression is
4346  // integer only.
4347  //
4348  // This routine still creates a generic Immediate operand, containing
4349  // a bitcast of the 64-bit floating point value. The various operands
4350  // that accept floats can check whether the value is valid for them
4351  // via the standard is*() predicates.
4352
4353  SMLoc S = Parser.getTok().getLoc();
4354
4355  if (Parser.getTok().isNot(AsmToken::Hash) &&
4356      Parser.getTok().isNot(AsmToken::Dollar))
4357    return MatchOperand_NoMatch;
4358
4359  // Disambiguate the VMOV forms that can accept an FP immediate.
4360  // vmov.f32 <sreg>, #imm
4361  // vmov.f64 <dreg>, #imm
4362  // vmov.f32 <dreg>, #imm  @ vector f32x2
4363  // vmov.f32 <qreg>, #imm  @ vector f32x4
4364  //
4365  // There are also the NEON VMOV instructions which expect an
4366  // integer constant. Make sure we don't try to parse an FPImm
4367  // for these:
4368  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4369  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4370  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4371                           TyOp->getToken() != ".f64"))
4372    return MatchOperand_NoMatch;
4373
4374  Parser.Lex(); // Eat the '#'.
4375
4376  // Handle negation, as that still comes through as a separate token.
4377  bool isNegative = false;
4378  if (Parser.getTok().is(AsmToken::Minus)) {
4379    isNegative = true;
4380    Parser.Lex();
4381  }
4382  const AsmToken &Tok = Parser.getTok();
4383  SMLoc Loc = Tok.getLoc();
4384  if (Tok.is(AsmToken::Real)) {
4385    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4386    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4387    // If we had a '-' in front, toggle the sign bit.
4388    IntVal ^= (uint64_t)isNegative << 31;
4389    Parser.Lex(); // Eat the token.
4390    Operands.push_back(ARMOperand::CreateImm(
4391          MCConstantExpr::Create(IntVal, getContext()),
4392          S, Parser.getTok().getLoc()));
4393    return MatchOperand_Success;
4394  }
4395  // Also handle plain integers. Instructions which allow floating point
4396  // immediates also allow a raw encoded 8-bit value.
4397  if (Tok.is(AsmToken::Integer)) {
4398    int64_t Val = Tok.getIntVal();
4399    Parser.Lex(); // Eat the token.
4400    if (Val > 255 || Val < 0) {
4401      Error(Loc, "encoded floating point value out of range");
4402      return MatchOperand_ParseFail;
4403    }
4404    double RealVal = ARM_AM::getFPImmFloat(Val);
4405    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4406    Operands.push_back(ARMOperand::CreateImm(
4407        MCConstantExpr::Create(Val, getContext()), S,
4408        Parser.getTok().getLoc()));
4409    return MatchOperand_Success;
4410  }
4411
4412  Error(Loc, "invalid floating point immediate");
4413  return MatchOperand_ParseFail;
4414}
4415
4416/// Parse a arm instruction operand.  For now this parses the operand regardless
4417/// of the mnemonic.
4418bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4419                                StringRef Mnemonic) {
4420  SMLoc S, E;
4421
4422  // Check if the current operand has a custom associated parser, if so, try to
4423  // custom parse the operand, or fallback to the general approach.
4424  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4425  if (ResTy == MatchOperand_Success)
4426    return false;
4427  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4428  // there was a match, but an error occurred, in which case, just return that
4429  // the operand parsing failed.
4430  if (ResTy == MatchOperand_ParseFail)
4431    return true;
4432
4433  switch (getLexer().getKind()) {
4434  default:
4435    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4436    return true;
4437  case AsmToken::Identifier: {
4438    if (!tryParseRegisterWithWriteBack(Operands))
4439      return false;
4440    int Res = tryParseShiftRegister(Operands);
4441    if (Res == 0) // success
4442      return false;
4443    else if (Res == -1) // irrecoverable error
4444      return true;
4445    // If this is VMRS, check for the apsr_nzcv operand.
4446    if (Mnemonic == "vmrs" &&
4447        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4448      S = Parser.getTok().getLoc();
4449      Parser.Lex();
4450      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4451      return false;
4452    }
4453
4454    // Fall though for the Identifier case that is not a register or a
4455    // special name.
4456  }
4457  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4458  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4459  case AsmToken::String:  // quoted label names.
4460  case AsmToken::Dot: {   // . as a branch target
4461    // This was not a register so parse other operands that start with an
4462    // identifier (like labels) as expressions and create them as immediates.
4463    const MCExpr *IdVal;
4464    S = Parser.getTok().getLoc();
4465    if (getParser().ParseExpression(IdVal))
4466      return true;
4467    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4468    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4469    return false;
4470  }
4471  case AsmToken::LBrac:
4472    return parseMemory(Operands);
4473  case AsmToken::LCurly:
4474    return parseRegisterList(Operands);
4475  case AsmToken::Dollar:
4476  case AsmToken::Hash: {
4477    // #42 -> immediate.
4478    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4479    S = Parser.getTok().getLoc();
4480    Parser.Lex();
4481    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4482    const MCExpr *ImmVal;
4483    if (getParser().ParseExpression(ImmVal))
4484      return true;
4485    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4486    if (CE) {
4487      int32_t Val = CE->getValue();
4488      if (isNegative && Val == 0)
4489        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4490    }
4491    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4492    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4493    return false;
4494  }
4495  case AsmToken::Colon: {
4496    // ":lower16:" and ":upper16:" expression prefixes
4497    // FIXME: Check it's an expression prefix,
4498    // e.g. (FOO - :lower16:BAR) isn't legal.
4499    ARMMCExpr::VariantKind RefKind;
4500    if (parsePrefix(RefKind))
4501      return true;
4502
4503    const MCExpr *SubExprVal;
4504    if (getParser().ParseExpression(SubExprVal))
4505      return true;
4506
4507    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4508                                                   getContext());
4509    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4510    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4511    return false;
4512  }
4513  }
4514}
4515
4516// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4517//  :lower16: and :upper16:.
4518bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4519  RefKind = ARMMCExpr::VK_ARM_None;
4520
4521  // :lower16: and :upper16: modifiers
4522  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4523  Parser.Lex(); // Eat ':'
4524
4525  if (getLexer().isNot(AsmToken::Identifier)) {
4526    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4527    return true;
4528  }
4529
4530  StringRef IDVal = Parser.getTok().getIdentifier();
4531  if (IDVal == "lower16") {
4532    RefKind = ARMMCExpr::VK_ARM_LO16;
4533  } else if (IDVal == "upper16") {
4534    RefKind = ARMMCExpr::VK_ARM_HI16;
4535  } else {
4536    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4537    return true;
4538  }
4539  Parser.Lex();
4540
4541  if (getLexer().isNot(AsmToken::Colon)) {
4542    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4543    return true;
4544  }
4545  Parser.Lex(); // Eat the last ':'
4546  return false;
4547}
4548
4549/// \brief Given a mnemonic, split out possible predication code and carry
4550/// setting letters to form a canonical mnemonic and flags.
4551//
4552// FIXME: Would be nice to autogen this.
4553// FIXME: This is a bit of a maze of special cases.
4554StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4555                                      unsigned &PredicationCode,
4556                                      bool &CarrySetting,
4557                                      unsigned &ProcessorIMod,
4558                                      StringRef &ITMask) {
4559  PredicationCode = ARMCC::AL;
4560  CarrySetting = false;
4561  ProcessorIMod = 0;
4562
4563  // Ignore some mnemonics we know aren't predicated forms.
4564  //
4565  // FIXME: Would be nice to autogen this.
4566  if ((Mnemonic == "movs" && isThumb()) ||
4567      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4568      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4569      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4570      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4571      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4572      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4573      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4574      Mnemonic == "fmuls")
4575    return Mnemonic;
4576
4577  // First, split out any predication code. Ignore mnemonics we know aren't
4578  // predicated but do have a carry-set and so weren't caught above.
4579  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4580      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4581      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4582      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4583    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4584      .Case("eq", ARMCC::EQ)
4585      .Case("ne", ARMCC::NE)
4586      .Case("hs", ARMCC::HS)
4587      .Case("cs", ARMCC::HS)
4588      .Case("lo", ARMCC::LO)
4589      .Case("cc", ARMCC::LO)
4590      .Case("mi", ARMCC::MI)
4591      .Case("pl", ARMCC::PL)
4592      .Case("vs", ARMCC::VS)
4593      .Case("vc", ARMCC::VC)
4594      .Case("hi", ARMCC::HI)
4595      .Case("ls", ARMCC::LS)
4596      .Case("ge", ARMCC::GE)
4597      .Case("lt", ARMCC::LT)
4598      .Case("gt", ARMCC::GT)
4599      .Case("le", ARMCC::LE)
4600      .Case("al", ARMCC::AL)
4601      .Default(~0U);
4602    if (CC != ~0U) {
4603      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4604      PredicationCode = CC;
4605    }
4606  }
4607
4608  // Next, determine if we have a carry setting bit. We explicitly ignore all
4609  // the instructions we know end in 's'.
4610  if (Mnemonic.endswith("s") &&
4611      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4612        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4613        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4614        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4615        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4616        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4617        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4618        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4619        (Mnemonic == "movs" && isThumb()))) {
4620    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4621    CarrySetting = true;
4622  }
4623
4624  // The "cps" instruction can have a interrupt mode operand which is glued into
4625  // the mnemonic. Check if this is the case, split it and parse the imod op
4626  if (Mnemonic.startswith("cps")) {
4627    // Split out any imod code.
4628    unsigned IMod =
4629      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4630      .Case("ie", ARM_PROC::IE)
4631      .Case("id", ARM_PROC::ID)
4632      .Default(~0U);
4633    if (IMod != ~0U) {
4634      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4635      ProcessorIMod = IMod;
4636    }
4637  }
4638
4639  // The "it" instruction has the condition mask on the end of the mnemonic.
4640  if (Mnemonic.startswith("it")) {
4641    ITMask = Mnemonic.slice(2, Mnemonic.size());
4642    Mnemonic = Mnemonic.slice(0, 2);
4643  }
4644
4645  return Mnemonic;
4646}
4647
4648/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4649/// inclusion of carry set or predication code operands.
4650//
4651// FIXME: It would be nice to autogen this.
4652void ARMAsmParser::
4653getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4654                      bool &CanAcceptPredicationCode) {
4655  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4656      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4657      Mnemonic == "add" || Mnemonic == "adc" ||
4658      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4659      Mnemonic == "orr" || Mnemonic == "mvn" ||
4660      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4661      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4662      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4663                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4664                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4665    CanAcceptCarrySet = true;
4666  } else
4667    CanAcceptCarrySet = false;
4668
4669  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4670      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4671      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4672      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4673      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4674      (Mnemonic == "clrex" && !isThumb()) ||
4675      (Mnemonic == "nop" && isThumbOne()) ||
4676      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4677        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4678        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4679      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4680       !isThumb()) ||
4681      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4682    CanAcceptPredicationCode = false;
4683  } else
4684    CanAcceptPredicationCode = true;
4685
4686  if (isThumb()) {
4687    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4688        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4689      CanAcceptPredicationCode = false;
4690  }
4691}
4692
4693bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4694                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4695  // FIXME: This is all horribly hacky. We really need a better way to deal
4696  // with optional operands like this in the matcher table.
4697
4698  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4699  // another does not. Specifically, the MOVW instruction does not. So we
4700  // special case it here and remove the defaulted (non-setting) cc_out
4701  // operand if that's the instruction we're trying to match.
4702  //
4703  // We do this as post-processing of the explicit operands rather than just
4704  // conditionally adding the cc_out in the first place because we need
4705  // to check the type of the parsed immediate operand.
4706  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4707      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4708      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4709      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4710    return true;
4711
4712  // Register-register 'add' for thumb does not have a cc_out operand
4713  // when there are only two register operands.
4714  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4715      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4716      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4717      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4718    return true;
4719  // Register-register 'add' for thumb does not have a cc_out operand
4720  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4721  // have to check the immediate range here since Thumb2 has a variant
4722  // that can handle a different range and has a cc_out operand.
4723  if (((isThumb() && Mnemonic == "add") ||
4724       (isThumbTwo() && Mnemonic == "sub")) &&
4725      Operands.size() == 6 &&
4726      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4727      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4728      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4729      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4730      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4731       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4732    return true;
4733  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4734  // imm0_4095 variant. That's the least-preferred variant when
4735  // selecting via the generic "add" mnemonic, so to know that we
4736  // should remove the cc_out operand, we have to explicitly check that
4737  // it's not one of the other variants. Ugh.
4738  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4739      Operands.size() == 6 &&
4740      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4741      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4742      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4743    // Nest conditions rather than one big 'if' statement for readability.
4744    //
4745    // If either register is a high reg, it's either one of the SP
4746    // variants (handled above) or a 32-bit encoding, so we just
4747    // check against T3. If the second register is the PC, this is an
4748    // alternate form of ADR, which uses encoding T4, so check for that too.
4749    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4750         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4751        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4752        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4753      return false;
4754    // If both registers are low, we're in an IT block, and the immediate is
4755    // in range, we should use encoding T1 instead, which has a cc_out.
4756    if (inITBlock() &&
4757        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4758        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4759        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4760      return false;
4761
4762    // Otherwise, we use encoding T4, which does not have a cc_out
4763    // operand.
4764    return true;
4765  }
4766
4767  // The thumb2 multiply instruction doesn't have a CCOut register, so
4768  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4769  // use the 16-bit encoding or not.
4770  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4771      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4772      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4773      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4774      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4775      // If the registers aren't low regs, the destination reg isn't the
4776      // same as one of the source regs, or the cc_out operand is zero
4777      // outside of an IT block, we have to use the 32-bit encoding, so
4778      // remove the cc_out operand.
4779      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4780       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4781       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4782       !inITBlock() ||
4783       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4784        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4785        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4786        static_cast<ARMOperand*>(Operands[4])->getReg())))
4787    return true;
4788
4789  // Also check the 'mul' syntax variant that doesn't specify an explicit
4790  // destination register.
4791  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4792      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4793      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4794      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4795      // If the registers aren't low regs  or the cc_out operand is zero
4796      // outside of an IT block, we have to use the 32-bit encoding, so
4797      // remove the cc_out operand.
4798      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4799       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4800       !inITBlock()))
4801    return true;
4802
4803
4804
4805  // Register-register 'add/sub' for thumb does not have a cc_out operand
4806  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4807  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4808  // right, this will result in better diagnostics (which operand is off)
4809  // anyway.
4810  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4811      (Operands.size() == 5 || Operands.size() == 6) &&
4812      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4813      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4814      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4815    return true;
4816
4817  return false;
4818}
4819
4820static bool isDataTypeToken(StringRef Tok) {
4821  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4822    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4823    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4824    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4825    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4826    Tok == ".f" || Tok == ".d";
4827}
4828
4829// FIXME: This bit should probably be handled via an explicit match class
4830// in the .td files that matches the suffix instead of having it be
4831// a literal string token the way it is now.
4832static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4833  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4834}
4835
4836static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4837/// Parse an arm instruction mnemonic followed by its operands.
4838bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4839                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4840  // Apply mnemonic aliases before doing anything else, as the destination
4841  // mnemnonic may include suffices and we want to handle them normally.
4842  // The generic tblgen'erated code does this later, at the start of
4843  // MatchInstructionImpl(), but that's too late for aliases that include
4844  // any sort of suffix.
4845  unsigned AvailableFeatures = getAvailableFeatures();
4846  applyMnemonicAliases(Name, AvailableFeatures);
4847
4848  // First check for the ARM-specific .req directive.
4849  if (Parser.getTok().is(AsmToken::Identifier) &&
4850      Parser.getTok().getIdentifier() == ".req") {
4851    parseDirectiveReq(Name, NameLoc);
4852    // We always return 'error' for this, as we're done with this
4853    // statement and don't need to match the 'instruction."
4854    return true;
4855  }
4856
4857  // Create the leading tokens for the mnemonic, split by '.' characters.
4858  size_t Start = 0, Next = Name.find('.');
4859  StringRef Mnemonic = Name.slice(Start, Next);
4860
4861  // Split out the predication code and carry setting flag from the mnemonic.
4862  unsigned PredicationCode;
4863  unsigned ProcessorIMod;
4864  bool CarrySetting;
4865  StringRef ITMask;
4866  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4867                           ProcessorIMod, ITMask);
4868
4869  // In Thumb1, only the branch (B) instruction can be predicated.
4870  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4871    Parser.EatToEndOfStatement();
4872    return Error(NameLoc, "conditional execution not supported in Thumb1");
4873  }
4874
4875  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4876
4877  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4878  // is the mask as it will be for the IT encoding if the conditional
4879  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4880  // where the conditional bit0 is zero, the instruction post-processing
4881  // will adjust the mask accordingly.
4882  if (Mnemonic == "it") {
4883    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4884    if (ITMask.size() > 3) {
4885      Parser.EatToEndOfStatement();
4886      return Error(Loc, "too many conditions on IT instruction");
4887    }
4888    unsigned Mask = 8;
4889    for (unsigned i = ITMask.size(); i != 0; --i) {
4890      char pos = ITMask[i - 1];
4891      if (pos != 't' && pos != 'e') {
4892        Parser.EatToEndOfStatement();
4893        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4894      }
4895      Mask >>= 1;
4896      if (ITMask[i - 1] == 't')
4897        Mask |= 8;
4898    }
4899    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4900  }
4901
4902  // FIXME: This is all a pretty gross hack. We should automatically handle
4903  // optional operands like this via tblgen.
4904
4905  // Next, add the CCOut and ConditionCode operands, if needed.
4906  //
4907  // For mnemonics which can ever incorporate a carry setting bit or predication
4908  // code, our matching model involves us always generating CCOut and
4909  // ConditionCode operands to match the mnemonic "as written" and then we let
4910  // the matcher deal with finding the right instruction or generating an
4911  // appropriate error.
4912  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4913  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4914
4915  // If we had a carry-set on an instruction that can't do that, issue an
4916  // error.
4917  if (!CanAcceptCarrySet && CarrySetting) {
4918    Parser.EatToEndOfStatement();
4919    return Error(NameLoc, "instruction '" + Mnemonic +
4920                 "' can not set flags, but 's' suffix specified");
4921  }
4922  // If we had a predication code on an instruction that can't do that, issue an
4923  // error.
4924  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4925    Parser.EatToEndOfStatement();
4926    return Error(NameLoc, "instruction '" + Mnemonic +
4927                 "' is not predicable, but condition code specified");
4928  }
4929
4930  // Add the carry setting operand, if necessary.
4931  if (CanAcceptCarrySet) {
4932    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4933    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4934                                               Loc));
4935  }
4936
4937  // Add the predication code operand, if necessary.
4938  if (CanAcceptPredicationCode) {
4939    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4940                                      CarrySetting);
4941    Operands.push_back(ARMOperand::CreateCondCode(
4942                         ARMCC::CondCodes(PredicationCode), Loc));
4943  }
4944
4945  // Add the processor imod operand, if necessary.
4946  if (ProcessorIMod) {
4947    Operands.push_back(ARMOperand::CreateImm(
4948          MCConstantExpr::Create(ProcessorIMod, getContext()),
4949                                 NameLoc, NameLoc));
4950  }
4951
4952  // Add the remaining tokens in the mnemonic.
4953  while (Next != StringRef::npos) {
4954    Start = Next;
4955    Next = Name.find('.', Start + 1);
4956    StringRef ExtraToken = Name.slice(Start, Next);
4957
4958    // Some NEON instructions have an optional datatype suffix that is
4959    // completely ignored. Check for that.
4960    if (isDataTypeToken(ExtraToken) &&
4961        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4962      continue;
4963
4964    if (ExtraToken != ".n") {
4965      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4966      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4967    }
4968  }
4969
4970  // Read the remaining operands.
4971  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4972    // Read the first operand.
4973    if (parseOperand(Operands, Mnemonic)) {
4974      Parser.EatToEndOfStatement();
4975      return true;
4976    }
4977
4978    while (getLexer().is(AsmToken::Comma)) {
4979      Parser.Lex();  // Eat the comma.
4980
4981      // Parse and remember the operand.
4982      if (parseOperand(Operands, Mnemonic)) {
4983        Parser.EatToEndOfStatement();
4984        return true;
4985      }
4986    }
4987  }
4988
4989  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4990    SMLoc Loc = getLexer().getLoc();
4991    Parser.EatToEndOfStatement();
4992    return Error(Loc, "unexpected token in argument list");
4993  }
4994
4995  Parser.Lex(); // Consume the EndOfStatement
4996
4997  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4998  // do and don't have a cc_out optional-def operand. With some spot-checks
4999  // of the operand list, we can figure out which variant we're trying to
5000  // parse and adjust accordingly before actually matching. We shouldn't ever
5001  // try to remove a cc_out operand that was explicitly set on the the
5002  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5003  // table driven matcher doesn't fit well with the ARM instruction set.
5004  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5005    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5006    Operands.erase(Operands.begin() + 1);
5007    delete Op;
5008  }
5009
5010  // ARM mode 'blx' need special handling, as the register operand version
5011  // is predicable, but the label operand version is not. So, we can't rely
5012  // on the Mnemonic based checking to correctly figure out when to put
5013  // a k_CondCode operand in the list. If we're trying to match the label
5014  // version, remove the k_CondCode operand here.
5015  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5016      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5017    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5018    Operands.erase(Operands.begin() + 1);
5019    delete Op;
5020  }
5021
5022  // The vector-compare-to-zero instructions have a literal token "#0" at
5023  // the end that comes to here as an immediate operand. Convert it to a
5024  // token to play nicely with the matcher.
5025  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5026      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5027      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5028    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5029    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5030    if (CE && CE->getValue() == 0) {
5031      Operands.erase(Operands.begin() + 5);
5032      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5033      delete Op;
5034    }
5035  }
5036  // VCMP{E} does the same thing, but with a different operand count.
5037  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5038      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5039    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5040    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5041    if (CE && CE->getValue() == 0) {
5042      Operands.erase(Operands.begin() + 4);
5043      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5044      delete Op;
5045    }
5046  }
5047  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5048  // end. Convert it to a token here. Take care not to convert those
5049  // that should hit the Thumb2 encoding.
5050  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5051      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5052      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5053      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5054    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5055    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5056    if (CE && CE->getValue() == 0 &&
5057        (isThumbOne() ||
5058         // The cc_out operand matches the IT block.
5059         ((inITBlock() != CarrySetting) &&
5060         // Neither register operand is a high register.
5061         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5062          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5063      Operands.erase(Operands.begin() + 5);
5064      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5065      delete Op;
5066    }
5067  }
5068
5069  return false;
5070}
5071
5072// Validate context-sensitive operand constraints.
5073
5074// return 'true' if register list contains non-low GPR registers,
5075// 'false' otherwise. If Reg is in the register list or is HiReg, set
5076// 'containsReg' to true.
5077static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5078                                 unsigned HiReg, bool &containsReg) {
5079  containsReg = false;
5080  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5081    unsigned OpReg = Inst.getOperand(i).getReg();
5082    if (OpReg == Reg)
5083      containsReg = true;
5084    // Anything other than a low register isn't legal here.
5085    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5086      return true;
5087  }
5088  return false;
5089}
5090
5091// Check if the specified regisgter is in the register list of the inst,
5092// starting at the indicated operand number.
5093static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5094  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5095    unsigned OpReg = Inst.getOperand(i).getReg();
5096    if (OpReg == Reg)
5097      return true;
5098  }
5099  return false;
5100}
5101
5102// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5103// the ARMInsts array) instead. Getting that here requires awkward
5104// API changes, though. Better way?
5105namespace llvm {
5106extern const MCInstrDesc ARMInsts[];
5107}
5108static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5109  return ARMInsts[Opcode];
5110}
5111
5112// FIXME: We would really like to be able to tablegen'erate this.
5113bool ARMAsmParser::
5114validateInstruction(MCInst &Inst,
5115                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5116  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5117  SMLoc Loc = Operands[0]->getStartLoc();
5118  // Check the IT block state first.
5119  // NOTE: BKPT instruction has the interesting property of being
5120  // allowed in IT blocks, but not being predicable.  It just always
5121  // executes.
5122  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5123      Inst.getOpcode() != ARM::BKPT) {
5124    unsigned bit = 1;
5125    if (ITState.FirstCond)
5126      ITState.FirstCond = false;
5127    else
5128      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5129    // The instruction must be predicable.
5130    if (!MCID.isPredicable())
5131      return Error(Loc, "instructions in IT block must be predicable");
5132    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5133    unsigned ITCond = bit ? ITState.Cond :
5134      ARMCC::getOppositeCondition(ITState.Cond);
5135    if (Cond != ITCond) {
5136      // Find the condition code Operand to get its SMLoc information.
5137      SMLoc CondLoc;
5138      for (unsigned i = 1; i < Operands.size(); ++i)
5139        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5140          CondLoc = Operands[i]->getStartLoc();
5141      return Error(CondLoc, "incorrect condition in IT block; got '" +
5142                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5143                   "', but expected '" +
5144                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5145    }
5146  // Check for non-'al' condition codes outside of the IT block.
5147  } else if (isThumbTwo() && MCID.isPredicable() &&
5148             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5149             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5150             Inst.getOpcode() != ARM::t2B)
5151    return Error(Loc, "predicated instructions must be in IT block");
5152
5153  switch (Inst.getOpcode()) {
5154  case ARM::LDRD:
5155  case ARM::LDRD_PRE:
5156  case ARM::LDRD_POST:
5157  case ARM::LDREXD: {
5158    // Rt2 must be Rt + 1.
5159    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5160    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5161    if (Rt2 != Rt + 1)
5162      return Error(Operands[3]->getStartLoc(),
5163                   "destination operands must be sequential");
5164    return false;
5165  }
5166  case ARM::STRD: {
5167    // Rt2 must be Rt + 1.
5168    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5169    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5170    if (Rt2 != Rt + 1)
5171      return Error(Operands[3]->getStartLoc(),
5172                   "source operands must be sequential");
5173    return false;
5174  }
5175  case ARM::STRD_PRE:
5176  case ARM::STRD_POST:
5177  case ARM::STREXD: {
5178    // Rt2 must be Rt + 1.
5179    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5180    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5181    if (Rt2 != Rt + 1)
5182      return Error(Operands[3]->getStartLoc(),
5183                   "source operands must be sequential");
5184    return false;
5185  }
5186  case ARM::SBFX:
5187  case ARM::UBFX: {
5188    // width must be in range [1, 32-lsb]
5189    unsigned lsb = Inst.getOperand(2).getImm();
5190    unsigned widthm1 = Inst.getOperand(3).getImm();
5191    if (widthm1 >= 32 - lsb)
5192      return Error(Operands[5]->getStartLoc(),
5193                   "bitfield width must be in range [1,32-lsb]");
5194    return false;
5195  }
5196  case ARM::tLDMIA: {
5197    // If we're parsing Thumb2, the .w variant is available and handles
5198    // most cases that are normally illegal for a Thumb1 LDM
5199    // instruction. We'll make the transformation in processInstruction()
5200    // if necessary.
5201    //
5202    // Thumb LDM instructions are writeback iff the base register is not
5203    // in the register list.
5204    unsigned Rn = Inst.getOperand(0).getReg();
5205    bool hasWritebackToken =
5206      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5207       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5208    bool listContainsBase;
5209    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5210      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5211                   "registers must be in range r0-r7");
5212    // If we should have writeback, then there should be a '!' token.
5213    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5214      return Error(Operands[2]->getStartLoc(),
5215                   "writeback operator '!' expected");
5216    // If we should not have writeback, there must not be a '!'. This is
5217    // true even for the 32-bit wide encodings.
5218    if (listContainsBase && hasWritebackToken)
5219      return Error(Operands[3]->getStartLoc(),
5220                   "writeback operator '!' not allowed when base register "
5221                   "in register list");
5222
5223    break;
5224  }
5225  case ARM::t2LDMIA_UPD: {
5226    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5227      return Error(Operands[4]->getStartLoc(),
5228                   "writeback operator '!' not allowed when base register "
5229                   "in register list");
5230    break;
5231  }
5232  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5233  // so only issue a diagnostic for thumb1. The instructions will be
5234  // switched to the t2 encodings in processInstruction() if necessary.
5235  case ARM::tPOP: {
5236    bool listContainsBase;
5237    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5238        !isThumbTwo())
5239      return Error(Operands[2]->getStartLoc(),
5240                   "registers must be in range r0-r7 or pc");
5241    break;
5242  }
5243  case ARM::tPUSH: {
5244    bool listContainsBase;
5245    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5246        !isThumbTwo())
5247      return Error(Operands[2]->getStartLoc(),
5248                   "registers must be in range r0-r7 or lr");
5249    break;
5250  }
5251  case ARM::tSTMIA_UPD: {
5252    bool listContainsBase;
5253    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5254      return Error(Operands[4]->getStartLoc(),
5255                   "registers must be in range r0-r7");
5256    break;
5257  }
5258  }
5259
5260  return false;
5261}
5262
5263static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5264  switch(Opc) {
5265  default: llvm_unreachable("unexpected opcode!");
5266  // VST1LN
5267  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5268  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5269  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5270  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5271  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5272  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5273  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5274  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5275  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5276
5277  // VST2LN
5278  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5279  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5280  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5281  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5282  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5283
5284  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5285  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5286  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5287  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5288  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5289
5290  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5291  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5292  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5293  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5294  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5295
5296  // VST3LN
5297  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5298  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5299  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5300  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5301  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5302  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5303  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5304  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5305  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5306  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5307  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5308  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5309  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5310  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5311  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5312
5313  // VST3
5314  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5315  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5316  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5317  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5318  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5319  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5320  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5321  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5322  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5323  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5324  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5325  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5326  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5327  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5328  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5329  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5330  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5331  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5332
5333  // VST4LN
5334  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5335  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5336  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5337  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5338  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5339  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5340  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5341  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5342  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5343  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5344  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5345  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5346  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5347  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5348  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5349
5350  // VST4
5351  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5352  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5353  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5354  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5355  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5356  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5357  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5358  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5359  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5360  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5361  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5362  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5363  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5364  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5365  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5366  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5367  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5368  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5369  }
5370}
5371
5372static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5373  switch(Opc) {
5374  default: llvm_unreachable("unexpected opcode!");
5375  // VLD1LN
5376  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5377  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5378  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5379  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5380  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5381  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5382  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5383  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5384  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5385
5386  // VLD2LN
5387  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5388  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5389  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5390  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5391  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5392  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5393  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5394  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5395  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5396  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5397  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5398  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5399  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5400  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5401  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5402
5403  // VLD3DUP
5404  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5405  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5406  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5407  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5408  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5409  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5410  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5411  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5412  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5413  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5414  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5415  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5416  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5417  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5418  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5419  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5420  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5421  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5422
5423  // VLD3LN
5424  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5425  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5426  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5427  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5428  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5429  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5430  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5431  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5432  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5433  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5434  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5435  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5436  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5437  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5438  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5439
5440  // VLD3
5441  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5442  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5443  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5444  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5445  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5446  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5447  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5448  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5449  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5450  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5451  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5452  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5453  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5454  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5455  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5456  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5457  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5458  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5459
5460  // VLD4LN
5461  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5462  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5463  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5464  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5465  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5466  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5467  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5468  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5469  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5470  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5471  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5472  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5473  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5474  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5475  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5476
5477  // VLD4DUP
5478  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5479  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5480  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5481  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5482  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5483  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5484  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5485  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5486  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5487  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5488  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5489  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5490  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5491  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5492  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5493  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5494  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5495  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5496
5497  // VLD4
5498  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5499  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5500  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5501  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5502  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5503  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5504  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5505  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5506  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5507  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5508  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5509  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5510  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5511  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5512  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5513  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5514  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5515  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5516  }
5517}
5518
5519bool ARMAsmParser::
5520processInstruction(MCInst &Inst,
5521                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5522  switch (Inst.getOpcode()) {
5523  // Aliases for alternate PC+imm syntax of LDR instructions.
5524  case ARM::t2LDRpcrel:
5525    Inst.setOpcode(ARM::t2LDRpci);
5526    return true;
5527  case ARM::t2LDRBpcrel:
5528    Inst.setOpcode(ARM::t2LDRBpci);
5529    return true;
5530  case ARM::t2LDRHpcrel:
5531    Inst.setOpcode(ARM::t2LDRHpci);
5532    return true;
5533  case ARM::t2LDRSBpcrel:
5534    Inst.setOpcode(ARM::t2LDRSBpci);
5535    return true;
5536  case ARM::t2LDRSHpcrel:
5537    Inst.setOpcode(ARM::t2LDRSHpci);
5538    return true;
5539  // Handle NEON VST complex aliases.
5540  case ARM::VST1LNdWB_register_Asm_8:
5541  case ARM::VST1LNdWB_register_Asm_16:
5542  case ARM::VST1LNdWB_register_Asm_32: {
5543    MCInst TmpInst;
5544    // Shuffle the operands around so the lane index operand is in the
5545    // right place.
5546    unsigned Spacing;
5547    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5548    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5549    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5550    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5551    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5552    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5553    TmpInst.addOperand(Inst.getOperand(1)); // lane
5554    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5555    TmpInst.addOperand(Inst.getOperand(6));
5556    Inst = TmpInst;
5557    return true;
5558  }
5559
5560  case ARM::VST2LNdWB_register_Asm_8:
5561  case ARM::VST2LNdWB_register_Asm_16:
5562  case ARM::VST2LNdWB_register_Asm_32:
5563  case ARM::VST2LNqWB_register_Asm_16:
5564  case ARM::VST2LNqWB_register_Asm_32: {
5565    MCInst TmpInst;
5566    // Shuffle the operands around so the lane index operand is in the
5567    // right place.
5568    unsigned Spacing;
5569    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5570    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5571    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5572    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5573    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5574    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5575    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5576                                            Spacing));
5577    TmpInst.addOperand(Inst.getOperand(1)); // lane
5578    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5579    TmpInst.addOperand(Inst.getOperand(6));
5580    Inst = TmpInst;
5581    return true;
5582  }
5583
5584  case ARM::VST3LNdWB_register_Asm_8:
5585  case ARM::VST3LNdWB_register_Asm_16:
5586  case ARM::VST3LNdWB_register_Asm_32:
5587  case ARM::VST3LNqWB_register_Asm_16:
5588  case ARM::VST3LNqWB_register_Asm_32: {
5589    MCInst TmpInst;
5590    // Shuffle the operands around so the lane index operand is in the
5591    // right place.
5592    unsigned Spacing;
5593    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5594    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5595    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5596    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5597    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5598    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5599    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5600                                            Spacing));
5601    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5602                                            Spacing * 2));
5603    TmpInst.addOperand(Inst.getOperand(1)); // lane
5604    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5605    TmpInst.addOperand(Inst.getOperand(6));
5606    Inst = TmpInst;
5607    return true;
5608  }
5609
5610  case ARM::VST4LNdWB_register_Asm_8:
5611  case ARM::VST4LNdWB_register_Asm_16:
5612  case ARM::VST4LNdWB_register_Asm_32:
5613  case ARM::VST4LNqWB_register_Asm_16:
5614  case ARM::VST4LNqWB_register_Asm_32: {
5615    MCInst TmpInst;
5616    // Shuffle the operands around so the lane index operand is in the
5617    // right place.
5618    unsigned Spacing;
5619    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5620    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5621    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5622    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5623    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5624    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5625    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5626                                            Spacing));
5627    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5628                                            Spacing * 2));
5629    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5630                                            Spacing * 3));
5631    TmpInst.addOperand(Inst.getOperand(1)); // lane
5632    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5633    TmpInst.addOperand(Inst.getOperand(6));
5634    Inst = TmpInst;
5635    return true;
5636  }
5637
5638  case ARM::VST1LNdWB_fixed_Asm_8:
5639  case ARM::VST1LNdWB_fixed_Asm_16:
5640  case ARM::VST1LNdWB_fixed_Asm_32: {
5641    MCInst TmpInst;
5642    // Shuffle the operands around so the lane index operand is in the
5643    // right place.
5644    unsigned Spacing;
5645    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5646    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5647    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5648    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5649    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5650    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5651    TmpInst.addOperand(Inst.getOperand(1)); // lane
5652    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5653    TmpInst.addOperand(Inst.getOperand(5));
5654    Inst = TmpInst;
5655    return true;
5656  }
5657
5658  case ARM::VST2LNdWB_fixed_Asm_8:
5659  case ARM::VST2LNdWB_fixed_Asm_16:
5660  case ARM::VST2LNdWB_fixed_Asm_32:
5661  case ARM::VST2LNqWB_fixed_Asm_16:
5662  case ARM::VST2LNqWB_fixed_Asm_32: {
5663    MCInst TmpInst;
5664    // Shuffle the operands around so the lane index operand is in the
5665    // right place.
5666    unsigned Spacing;
5667    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5668    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5669    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5670    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5671    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5672    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5673    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5674                                            Spacing));
5675    TmpInst.addOperand(Inst.getOperand(1)); // lane
5676    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5677    TmpInst.addOperand(Inst.getOperand(5));
5678    Inst = TmpInst;
5679    return true;
5680  }
5681
5682  case ARM::VST3LNdWB_fixed_Asm_8:
5683  case ARM::VST3LNdWB_fixed_Asm_16:
5684  case ARM::VST3LNdWB_fixed_Asm_32:
5685  case ARM::VST3LNqWB_fixed_Asm_16:
5686  case ARM::VST3LNqWB_fixed_Asm_32: {
5687    MCInst TmpInst;
5688    // Shuffle the operands around so the lane index operand is in the
5689    // right place.
5690    unsigned Spacing;
5691    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5692    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5693    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5694    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5695    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5696    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5697    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5698                                            Spacing));
5699    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5700                                            Spacing * 2));
5701    TmpInst.addOperand(Inst.getOperand(1)); // lane
5702    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5703    TmpInst.addOperand(Inst.getOperand(5));
5704    Inst = TmpInst;
5705    return true;
5706  }
5707
5708  case ARM::VST4LNdWB_fixed_Asm_8:
5709  case ARM::VST4LNdWB_fixed_Asm_16:
5710  case ARM::VST4LNdWB_fixed_Asm_32:
5711  case ARM::VST4LNqWB_fixed_Asm_16:
5712  case ARM::VST4LNqWB_fixed_Asm_32: {
5713    MCInst TmpInst;
5714    // Shuffle the operands around so the lane index operand is in the
5715    // right place.
5716    unsigned Spacing;
5717    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5718    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5719    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5720    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5721    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5722    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5723    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5724                                            Spacing));
5725    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5726                                            Spacing * 2));
5727    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5728                                            Spacing * 3));
5729    TmpInst.addOperand(Inst.getOperand(1)); // lane
5730    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5731    TmpInst.addOperand(Inst.getOperand(5));
5732    Inst = TmpInst;
5733    return true;
5734  }
5735
5736  case ARM::VST1LNdAsm_8:
5737  case ARM::VST1LNdAsm_16:
5738  case ARM::VST1LNdAsm_32: {
5739    MCInst TmpInst;
5740    // Shuffle the operands around so the lane index operand is in the
5741    // right place.
5742    unsigned Spacing;
5743    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5744    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5745    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5746    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5747    TmpInst.addOperand(Inst.getOperand(1)); // lane
5748    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5749    TmpInst.addOperand(Inst.getOperand(5));
5750    Inst = TmpInst;
5751    return true;
5752  }
5753
5754  case ARM::VST2LNdAsm_8:
5755  case ARM::VST2LNdAsm_16:
5756  case ARM::VST2LNdAsm_32:
5757  case ARM::VST2LNqAsm_16:
5758  case ARM::VST2LNqAsm_32: {
5759    MCInst TmpInst;
5760    // Shuffle the operands around so the lane index operand is in the
5761    // right place.
5762    unsigned Spacing;
5763    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5764    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5765    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5766    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5767    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5768                                            Spacing));
5769    TmpInst.addOperand(Inst.getOperand(1)); // lane
5770    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5771    TmpInst.addOperand(Inst.getOperand(5));
5772    Inst = TmpInst;
5773    return true;
5774  }
5775
5776  case ARM::VST3LNdAsm_8:
5777  case ARM::VST3LNdAsm_16:
5778  case ARM::VST3LNdAsm_32:
5779  case ARM::VST3LNqAsm_16:
5780  case ARM::VST3LNqAsm_32: {
5781    MCInst TmpInst;
5782    // Shuffle the operands around so the lane index operand is in the
5783    // right place.
5784    unsigned Spacing;
5785    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5786    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5787    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5788    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5789    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5790                                            Spacing));
5791    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5792                                            Spacing * 2));
5793    TmpInst.addOperand(Inst.getOperand(1)); // lane
5794    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5795    TmpInst.addOperand(Inst.getOperand(5));
5796    Inst = TmpInst;
5797    return true;
5798  }
5799
5800  case ARM::VST4LNdAsm_8:
5801  case ARM::VST4LNdAsm_16:
5802  case ARM::VST4LNdAsm_32:
5803  case ARM::VST4LNqAsm_16:
5804  case ARM::VST4LNqAsm_32: {
5805    MCInst TmpInst;
5806    // Shuffle the operands around so the lane index operand is in the
5807    // right place.
5808    unsigned Spacing;
5809    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5810    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5811    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5812    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5813    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5814                                            Spacing));
5815    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5816                                            Spacing * 2));
5817    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5818                                            Spacing * 3));
5819    TmpInst.addOperand(Inst.getOperand(1)); // lane
5820    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5821    TmpInst.addOperand(Inst.getOperand(5));
5822    Inst = TmpInst;
5823    return true;
5824  }
5825
5826  // Handle NEON VLD complex aliases.
5827  case ARM::VLD1LNdWB_register_Asm_8:
5828  case ARM::VLD1LNdWB_register_Asm_16:
5829  case ARM::VLD1LNdWB_register_Asm_32: {
5830    MCInst TmpInst;
5831    // Shuffle the operands around so the lane index operand is in the
5832    // right place.
5833    unsigned Spacing;
5834    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5835    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5836    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5837    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5838    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5839    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5840    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5841    TmpInst.addOperand(Inst.getOperand(1)); // lane
5842    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5843    TmpInst.addOperand(Inst.getOperand(6));
5844    Inst = TmpInst;
5845    return true;
5846  }
5847
5848  case ARM::VLD2LNdWB_register_Asm_8:
5849  case ARM::VLD2LNdWB_register_Asm_16:
5850  case ARM::VLD2LNdWB_register_Asm_32:
5851  case ARM::VLD2LNqWB_register_Asm_16:
5852  case ARM::VLD2LNqWB_register_Asm_32: {
5853    MCInst TmpInst;
5854    // Shuffle the operands around so the lane index operand is in the
5855    // right place.
5856    unsigned Spacing;
5857    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5858    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5859    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5860                                            Spacing));
5861    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5862    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5863    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5864    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5865    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5866    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5867                                            Spacing));
5868    TmpInst.addOperand(Inst.getOperand(1)); // lane
5869    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5870    TmpInst.addOperand(Inst.getOperand(6));
5871    Inst = TmpInst;
5872    return true;
5873  }
5874
5875  case ARM::VLD3LNdWB_register_Asm_8:
5876  case ARM::VLD3LNdWB_register_Asm_16:
5877  case ARM::VLD3LNdWB_register_Asm_32:
5878  case ARM::VLD3LNqWB_register_Asm_16:
5879  case ARM::VLD3LNqWB_register_Asm_32: {
5880    MCInst TmpInst;
5881    // Shuffle the operands around so the lane index operand is in the
5882    // right place.
5883    unsigned Spacing;
5884    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5885    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5886    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5887                                            Spacing));
5888    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5889                                            Spacing * 2));
5890    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5891    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5892    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5893    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5894    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5895    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5896                                            Spacing));
5897    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5898                                            Spacing * 2));
5899    TmpInst.addOperand(Inst.getOperand(1)); // lane
5900    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5901    TmpInst.addOperand(Inst.getOperand(6));
5902    Inst = TmpInst;
5903    return true;
5904  }
5905
5906  case ARM::VLD4LNdWB_register_Asm_8:
5907  case ARM::VLD4LNdWB_register_Asm_16:
5908  case ARM::VLD4LNdWB_register_Asm_32:
5909  case ARM::VLD4LNqWB_register_Asm_16:
5910  case ARM::VLD4LNqWB_register_Asm_32: {
5911    MCInst TmpInst;
5912    // Shuffle the operands around so the lane index operand is in the
5913    // right place.
5914    unsigned Spacing;
5915    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5916    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5917    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5918                                            Spacing));
5919    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5920                                            Spacing * 2));
5921    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5922                                            Spacing * 3));
5923    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5924    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5925    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5926    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5927    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5928    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5929                                            Spacing));
5930    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5931                                            Spacing * 2));
5932    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5933                                            Spacing * 3));
5934    TmpInst.addOperand(Inst.getOperand(1)); // lane
5935    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5936    TmpInst.addOperand(Inst.getOperand(6));
5937    Inst = TmpInst;
5938    return true;
5939  }
5940
5941  case ARM::VLD1LNdWB_fixed_Asm_8:
5942  case ARM::VLD1LNdWB_fixed_Asm_16:
5943  case ARM::VLD1LNdWB_fixed_Asm_32: {
5944    MCInst TmpInst;
5945    // Shuffle the operands around so the lane index operand is in the
5946    // right place.
5947    unsigned Spacing;
5948    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5949    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5950    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5951    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5952    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5953    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5954    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5955    TmpInst.addOperand(Inst.getOperand(1)); // lane
5956    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5957    TmpInst.addOperand(Inst.getOperand(5));
5958    Inst = TmpInst;
5959    return true;
5960  }
5961
5962  case ARM::VLD2LNdWB_fixed_Asm_8:
5963  case ARM::VLD2LNdWB_fixed_Asm_16:
5964  case ARM::VLD2LNdWB_fixed_Asm_32:
5965  case ARM::VLD2LNqWB_fixed_Asm_16:
5966  case ARM::VLD2LNqWB_fixed_Asm_32: {
5967    MCInst TmpInst;
5968    // Shuffle the operands around so the lane index operand is in the
5969    // right place.
5970    unsigned Spacing;
5971    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5972    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5973    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5974                                            Spacing));
5975    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5976    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5977    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5978    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5979    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5980    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5981                                            Spacing));
5982    TmpInst.addOperand(Inst.getOperand(1)); // lane
5983    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5984    TmpInst.addOperand(Inst.getOperand(5));
5985    Inst = TmpInst;
5986    return true;
5987  }
5988
5989  case ARM::VLD3LNdWB_fixed_Asm_8:
5990  case ARM::VLD3LNdWB_fixed_Asm_16:
5991  case ARM::VLD3LNdWB_fixed_Asm_32:
5992  case ARM::VLD3LNqWB_fixed_Asm_16:
5993  case ARM::VLD3LNqWB_fixed_Asm_32: {
5994    MCInst TmpInst;
5995    // Shuffle the operands around so the lane index operand is in the
5996    // right place.
5997    unsigned Spacing;
5998    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5999    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6000    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6001                                            Spacing));
6002    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6003                                            Spacing * 2));
6004    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6005    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6006    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6007    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6008    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6009    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6010                                            Spacing));
6011    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6012                                            Spacing * 2));
6013    TmpInst.addOperand(Inst.getOperand(1)); // lane
6014    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6015    TmpInst.addOperand(Inst.getOperand(5));
6016    Inst = TmpInst;
6017    return true;
6018  }
6019
6020  case ARM::VLD4LNdWB_fixed_Asm_8:
6021  case ARM::VLD4LNdWB_fixed_Asm_16:
6022  case ARM::VLD4LNdWB_fixed_Asm_32:
6023  case ARM::VLD4LNqWB_fixed_Asm_16:
6024  case ARM::VLD4LNqWB_fixed_Asm_32: {
6025    MCInst TmpInst;
6026    // Shuffle the operands around so the lane index operand is in the
6027    // right place.
6028    unsigned Spacing;
6029    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6030    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6031    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6032                                            Spacing));
6033    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6034                                            Spacing * 2));
6035    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6036                                            Spacing * 3));
6037    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6038    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6039    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6040    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6041    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6042    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6043                                            Spacing));
6044    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6045                                            Spacing * 2));
6046    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6047                                            Spacing * 3));
6048    TmpInst.addOperand(Inst.getOperand(1)); // lane
6049    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6050    TmpInst.addOperand(Inst.getOperand(5));
6051    Inst = TmpInst;
6052    return true;
6053  }
6054
6055  case ARM::VLD1LNdAsm_8:
6056  case ARM::VLD1LNdAsm_16:
6057  case ARM::VLD1LNdAsm_32: {
6058    MCInst TmpInst;
6059    // Shuffle the operands around so the lane index operand is in the
6060    // right place.
6061    unsigned Spacing;
6062    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6063    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6064    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6065    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6066    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6067    TmpInst.addOperand(Inst.getOperand(1)); // lane
6068    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6069    TmpInst.addOperand(Inst.getOperand(5));
6070    Inst = TmpInst;
6071    return true;
6072  }
6073
6074  case ARM::VLD2LNdAsm_8:
6075  case ARM::VLD2LNdAsm_16:
6076  case ARM::VLD2LNdAsm_32:
6077  case ARM::VLD2LNqAsm_16:
6078  case ARM::VLD2LNqAsm_32: {
6079    MCInst TmpInst;
6080    // Shuffle the operands around so the lane index operand is in the
6081    // right place.
6082    unsigned Spacing;
6083    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6084    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6085    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6086                                            Spacing));
6087    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6088    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6089    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6090    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6091                                            Spacing));
6092    TmpInst.addOperand(Inst.getOperand(1)); // lane
6093    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6094    TmpInst.addOperand(Inst.getOperand(5));
6095    Inst = TmpInst;
6096    return true;
6097  }
6098
6099  case ARM::VLD3LNdAsm_8:
6100  case ARM::VLD3LNdAsm_16:
6101  case ARM::VLD3LNdAsm_32:
6102  case ARM::VLD3LNqAsm_16:
6103  case ARM::VLD3LNqAsm_32: {
6104    MCInst TmpInst;
6105    // Shuffle the operands around so the lane index operand is in the
6106    // right place.
6107    unsigned Spacing;
6108    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6109    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6110    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6111                                            Spacing));
6112    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6113                                            Spacing * 2));
6114    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6115    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6116    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6117    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6118                                            Spacing));
6119    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6120                                            Spacing * 2));
6121    TmpInst.addOperand(Inst.getOperand(1)); // lane
6122    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6123    TmpInst.addOperand(Inst.getOperand(5));
6124    Inst = TmpInst;
6125    return true;
6126  }
6127
6128  case ARM::VLD4LNdAsm_8:
6129  case ARM::VLD4LNdAsm_16:
6130  case ARM::VLD4LNdAsm_32:
6131  case ARM::VLD4LNqAsm_16:
6132  case ARM::VLD4LNqAsm_32: {
6133    MCInst TmpInst;
6134    // Shuffle the operands around so the lane index operand is in the
6135    // right place.
6136    unsigned Spacing;
6137    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6138    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6139    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6140                                            Spacing));
6141    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6142                                            Spacing * 2));
6143    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6144                                            Spacing * 3));
6145    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6146    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6147    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6148    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6149                                            Spacing));
6150    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6151                                            Spacing * 2));
6152    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6153                                            Spacing * 3));
6154    TmpInst.addOperand(Inst.getOperand(1)); // lane
6155    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6156    TmpInst.addOperand(Inst.getOperand(5));
6157    Inst = TmpInst;
6158    return true;
6159  }
6160
6161  // VLD3DUP single 3-element structure to all lanes instructions.
6162  case ARM::VLD3DUPdAsm_8:
6163  case ARM::VLD3DUPdAsm_16:
6164  case ARM::VLD3DUPdAsm_32:
6165  case ARM::VLD3DUPqAsm_8:
6166  case ARM::VLD3DUPqAsm_16:
6167  case ARM::VLD3DUPqAsm_32: {
6168    MCInst TmpInst;
6169    unsigned Spacing;
6170    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6171    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6172    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6173                                            Spacing));
6174    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6175                                            Spacing * 2));
6176    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6177    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6178    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6179    TmpInst.addOperand(Inst.getOperand(4));
6180    Inst = TmpInst;
6181    return true;
6182  }
6183
6184  case ARM::VLD3DUPdWB_fixed_Asm_8:
6185  case ARM::VLD3DUPdWB_fixed_Asm_16:
6186  case ARM::VLD3DUPdWB_fixed_Asm_32:
6187  case ARM::VLD3DUPqWB_fixed_Asm_8:
6188  case ARM::VLD3DUPqWB_fixed_Asm_16:
6189  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6190    MCInst TmpInst;
6191    unsigned Spacing;
6192    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6193    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6194    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6195                                            Spacing));
6196    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6197                                            Spacing * 2));
6198    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6199    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6200    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6201    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6202    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6203    TmpInst.addOperand(Inst.getOperand(4));
6204    Inst = TmpInst;
6205    return true;
6206  }
6207
6208  case ARM::VLD3DUPdWB_register_Asm_8:
6209  case ARM::VLD3DUPdWB_register_Asm_16:
6210  case ARM::VLD3DUPdWB_register_Asm_32:
6211  case ARM::VLD3DUPqWB_register_Asm_8:
6212  case ARM::VLD3DUPqWB_register_Asm_16:
6213  case ARM::VLD3DUPqWB_register_Asm_32: {
6214    MCInst TmpInst;
6215    unsigned Spacing;
6216    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6217    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6218    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6219                                            Spacing));
6220    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6221                                            Spacing * 2));
6222    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6223    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6224    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6225    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6226    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6227    TmpInst.addOperand(Inst.getOperand(5));
6228    Inst = TmpInst;
6229    return true;
6230  }
6231
6232  // VLD3 multiple 3-element structure instructions.
6233  case ARM::VLD3dAsm_8:
6234  case ARM::VLD3dAsm_16:
6235  case ARM::VLD3dAsm_32:
6236  case ARM::VLD3qAsm_8:
6237  case ARM::VLD3qAsm_16:
6238  case ARM::VLD3qAsm_32: {
6239    MCInst TmpInst;
6240    unsigned Spacing;
6241    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6242    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6243    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6244                                            Spacing));
6245    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6246                                            Spacing * 2));
6247    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6248    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6249    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6250    TmpInst.addOperand(Inst.getOperand(4));
6251    Inst = TmpInst;
6252    return true;
6253  }
6254
6255  case ARM::VLD3dWB_fixed_Asm_8:
6256  case ARM::VLD3dWB_fixed_Asm_16:
6257  case ARM::VLD3dWB_fixed_Asm_32:
6258  case ARM::VLD3qWB_fixed_Asm_8:
6259  case ARM::VLD3qWB_fixed_Asm_16:
6260  case ARM::VLD3qWB_fixed_Asm_32: {
6261    MCInst TmpInst;
6262    unsigned Spacing;
6263    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6264    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6265    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6266                                            Spacing));
6267    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6268                                            Spacing * 2));
6269    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6270    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6271    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6272    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6273    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6274    TmpInst.addOperand(Inst.getOperand(4));
6275    Inst = TmpInst;
6276    return true;
6277  }
6278
6279  case ARM::VLD3dWB_register_Asm_8:
6280  case ARM::VLD3dWB_register_Asm_16:
6281  case ARM::VLD3dWB_register_Asm_32:
6282  case ARM::VLD3qWB_register_Asm_8:
6283  case ARM::VLD3qWB_register_Asm_16:
6284  case ARM::VLD3qWB_register_Asm_32: {
6285    MCInst TmpInst;
6286    unsigned Spacing;
6287    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6288    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6289    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6290                                            Spacing));
6291    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6292                                            Spacing * 2));
6293    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6294    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6295    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6296    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6297    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6298    TmpInst.addOperand(Inst.getOperand(5));
6299    Inst = TmpInst;
6300    return true;
6301  }
6302
6303  // VLD4DUP single 3-element structure to all lanes instructions.
6304  case ARM::VLD4DUPdAsm_8:
6305  case ARM::VLD4DUPdAsm_16:
6306  case ARM::VLD4DUPdAsm_32:
6307  case ARM::VLD4DUPqAsm_8:
6308  case ARM::VLD4DUPqAsm_16:
6309  case ARM::VLD4DUPqAsm_32: {
6310    MCInst TmpInst;
6311    unsigned Spacing;
6312    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6313    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6314    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6315                                            Spacing));
6316    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6317                                            Spacing * 2));
6318    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6319                                            Spacing * 3));
6320    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6321    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6322    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6323    TmpInst.addOperand(Inst.getOperand(4));
6324    Inst = TmpInst;
6325    return true;
6326  }
6327
6328  case ARM::VLD4DUPdWB_fixed_Asm_8:
6329  case ARM::VLD4DUPdWB_fixed_Asm_16:
6330  case ARM::VLD4DUPdWB_fixed_Asm_32:
6331  case ARM::VLD4DUPqWB_fixed_Asm_8:
6332  case ARM::VLD4DUPqWB_fixed_Asm_16:
6333  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6334    MCInst TmpInst;
6335    unsigned Spacing;
6336    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6337    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6338    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6339                                            Spacing));
6340    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6341                                            Spacing * 2));
6342    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6343                                            Spacing * 3));
6344    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6345    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6346    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6347    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6348    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6349    TmpInst.addOperand(Inst.getOperand(4));
6350    Inst = TmpInst;
6351    return true;
6352  }
6353
6354  case ARM::VLD4DUPdWB_register_Asm_8:
6355  case ARM::VLD4DUPdWB_register_Asm_16:
6356  case ARM::VLD4DUPdWB_register_Asm_32:
6357  case ARM::VLD4DUPqWB_register_Asm_8:
6358  case ARM::VLD4DUPqWB_register_Asm_16:
6359  case ARM::VLD4DUPqWB_register_Asm_32: {
6360    MCInst TmpInst;
6361    unsigned Spacing;
6362    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6363    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6364    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6365                                            Spacing));
6366    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6367                                            Spacing * 2));
6368    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6369                                            Spacing * 3));
6370    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6371    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6372    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6373    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6374    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6375    TmpInst.addOperand(Inst.getOperand(5));
6376    Inst = TmpInst;
6377    return true;
6378  }
6379
6380  // VLD4 multiple 4-element structure instructions.
6381  case ARM::VLD4dAsm_8:
6382  case ARM::VLD4dAsm_16:
6383  case ARM::VLD4dAsm_32:
6384  case ARM::VLD4qAsm_8:
6385  case ARM::VLD4qAsm_16:
6386  case ARM::VLD4qAsm_32: {
6387    MCInst TmpInst;
6388    unsigned Spacing;
6389    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6390    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6391    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6392                                            Spacing));
6393    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6394                                            Spacing * 2));
6395    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6396                                            Spacing * 3));
6397    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6398    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6399    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6400    TmpInst.addOperand(Inst.getOperand(4));
6401    Inst = TmpInst;
6402    return true;
6403  }
6404
6405  case ARM::VLD4dWB_fixed_Asm_8:
6406  case ARM::VLD4dWB_fixed_Asm_16:
6407  case ARM::VLD4dWB_fixed_Asm_32:
6408  case ARM::VLD4qWB_fixed_Asm_8:
6409  case ARM::VLD4qWB_fixed_Asm_16:
6410  case ARM::VLD4qWB_fixed_Asm_32: {
6411    MCInst TmpInst;
6412    unsigned Spacing;
6413    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6414    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6415    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6416                                            Spacing));
6417    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6418                                            Spacing * 2));
6419    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6420                                            Spacing * 3));
6421    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6422    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6423    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6424    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6425    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6426    TmpInst.addOperand(Inst.getOperand(4));
6427    Inst = TmpInst;
6428    return true;
6429  }
6430
6431  case ARM::VLD4dWB_register_Asm_8:
6432  case ARM::VLD4dWB_register_Asm_16:
6433  case ARM::VLD4dWB_register_Asm_32:
6434  case ARM::VLD4qWB_register_Asm_8:
6435  case ARM::VLD4qWB_register_Asm_16:
6436  case ARM::VLD4qWB_register_Asm_32: {
6437    MCInst TmpInst;
6438    unsigned Spacing;
6439    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6440    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6441    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6442                                            Spacing));
6443    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6444                                            Spacing * 2));
6445    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6446                                            Spacing * 3));
6447    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6448    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6449    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6450    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6451    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6452    TmpInst.addOperand(Inst.getOperand(5));
6453    Inst = TmpInst;
6454    return true;
6455  }
6456
6457  // VST3 multiple 3-element structure instructions.
6458  case ARM::VST3dAsm_8:
6459  case ARM::VST3dAsm_16:
6460  case ARM::VST3dAsm_32:
6461  case ARM::VST3qAsm_8:
6462  case ARM::VST3qAsm_16:
6463  case ARM::VST3qAsm_32: {
6464    MCInst TmpInst;
6465    unsigned Spacing;
6466    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6467    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6468    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6469    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6470    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6471                                            Spacing));
6472    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6473                                            Spacing * 2));
6474    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6475    TmpInst.addOperand(Inst.getOperand(4));
6476    Inst = TmpInst;
6477    return true;
6478  }
6479
6480  case ARM::VST3dWB_fixed_Asm_8:
6481  case ARM::VST3dWB_fixed_Asm_16:
6482  case ARM::VST3dWB_fixed_Asm_32:
6483  case ARM::VST3qWB_fixed_Asm_8:
6484  case ARM::VST3qWB_fixed_Asm_16:
6485  case ARM::VST3qWB_fixed_Asm_32: {
6486    MCInst TmpInst;
6487    unsigned Spacing;
6488    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6489    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6490    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6491    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6492    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6493    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6494    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6495                                            Spacing));
6496    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6497                                            Spacing * 2));
6498    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6499    TmpInst.addOperand(Inst.getOperand(4));
6500    Inst = TmpInst;
6501    return true;
6502  }
6503
6504  case ARM::VST3dWB_register_Asm_8:
6505  case ARM::VST3dWB_register_Asm_16:
6506  case ARM::VST3dWB_register_Asm_32:
6507  case ARM::VST3qWB_register_Asm_8:
6508  case ARM::VST3qWB_register_Asm_16:
6509  case ARM::VST3qWB_register_Asm_32: {
6510    MCInst TmpInst;
6511    unsigned Spacing;
6512    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6513    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6514    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6515    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6516    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6517    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6518    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6519                                            Spacing));
6520    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6521                                            Spacing * 2));
6522    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6523    TmpInst.addOperand(Inst.getOperand(5));
6524    Inst = TmpInst;
6525    return true;
6526  }
6527
6528  // VST4 multiple 3-element structure instructions.
6529  case ARM::VST4dAsm_8:
6530  case ARM::VST4dAsm_16:
6531  case ARM::VST4dAsm_32:
6532  case ARM::VST4qAsm_8:
6533  case ARM::VST4qAsm_16:
6534  case ARM::VST4qAsm_32: {
6535    MCInst TmpInst;
6536    unsigned Spacing;
6537    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6538    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6539    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6540    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6541    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6542                                            Spacing));
6543    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6544                                            Spacing * 2));
6545    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6546                                            Spacing * 3));
6547    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6548    TmpInst.addOperand(Inst.getOperand(4));
6549    Inst = TmpInst;
6550    return true;
6551  }
6552
6553  case ARM::VST4dWB_fixed_Asm_8:
6554  case ARM::VST4dWB_fixed_Asm_16:
6555  case ARM::VST4dWB_fixed_Asm_32:
6556  case ARM::VST4qWB_fixed_Asm_8:
6557  case ARM::VST4qWB_fixed_Asm_16:
6558  case ARM::VST4qWB_fixed_Asm_32: {
6559    MCInst TmpInst;
6560    unsigned Spacing;
6561    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6562    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6563    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6564    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6565    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6566    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6567    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6568                                            Spacing));
6569    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6570                                            Spacing * 2));
6571    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6572                                            Spacing * 3));
6573    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6574    TmpInst.addOperand(Inst.getOperand(4));
6575    Inst = TmpInst;
6576    return true;
6577  }
6578
6579  case ARM::VST4dWB_register_Asm_8:
6580  case ARM::VST4dWB_register_Asm_16:
6581  case ARM::VST4dWB_register_Asm_32:
6582  case ARM::VST4qWB_register_Asm_8:
6583  case ARM::VST4qWB_register_Asm_16:
6584  case ARM::VST4qWB_register_Asm_32: {
6585    MCInst TmpInst;
6586    unsigned Spacing;
6587    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6588    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6589    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6590    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6591    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6592    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6593    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6594                                            Spacing));
6595    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6596                                            Spacing * 2));
6597    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6598                                            Spacing * 3));
6599    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6600    TmpInst.addOperand(Inst.getOperand(5));
6601    Inst = TmpInst;
6602    return true;
6603  }
6604
6605  // Handle the Thumb2 mode MOV complex aliases.
6606  case ARM::t2MOVsr:
6607  case ARM::t2MOVSsr: {
6608    // Which instruction to expand to depends on the CCOut operand and
6609    // whether we're in an IT block if the register operands are low
6610    // registers.
6611    bool isNarrow = false;
6612    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6613        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6614        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6615        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6616        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6617      isNarrow = true;
6618    MCInst TmpInst;
6619    unsigned newOpc;
6620    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6621    default: llvm_unreachable("unexpected opcode!");
6622    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6623    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6624    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6625    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6626    }
6627    TmpInst.setOpcode(newOpc);
6628    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6629    if (isNarrow)
6630      TmpInst.addOperand(MCOperand::CreateReg(
6631          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6632    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6633    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6634    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6635    TmpInst.addOperand(Inst.getOperand(5));
6636    if (!isNarrow)
6637      TmpInst.addOperand(MCOperand::CreateReg(
6638          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6639    Inst = TmpInst;
6640    return true;
6641  }
6642  case ARM::t2MOVsi:
6643  case ARM::t2MOVSsi: {
6644    // Which instruction to expand to depends on the CCOut operand and
6645    // whether we're in an IT block if the register operands are low
6646    // registers.
6647    bool isNarrow = false;
6648    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6649        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6650        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6651      isNarrow = true;
6652    MCInst TmpInst;
6653    unsigned newOpc;
6654    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6655    default: llvm_unreachable("unexpected opcode!");
6656    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6657    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6658    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6659    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6660    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6661    }
6662    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6663    if (Ammount == 32) Ammount = 0;
6664    TmpInst.setOpcode(newOpc);
6665    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6666    if (isNarrow)
6667      TmpInst.addOperand(MCOperand::CreateReg(
6668          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6669    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6670    if (newOpc != ARM::t2RRX)
6671      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6672    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6673    TmpInst.addOperand(Inst.getOperand(4));
6674    if (!isNarrow)
6675      TmpInst.addOperand(MCOperand::CreateReg(
6676          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6677    Inst = TmpInst;
6678    return true;
6679  }
6680  // Handle the ARM mode MOV complex aliases.
6681  case ARM::ASRr:
6682  case ARM::LSRr:
6683  case ARM::LSLr:
6684  case ARM::RORr: {
6685    ARM_AM::ShiftOpc ShiftTy;
6686    switch(Inst.getOpcode()) {
6687    default: llvm_unreachable("unexpected opcode!");
6688    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6689    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6690    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6691    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6692    }
6693    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6694    MCInst TmpInst;
6695    TmpInst.setOpcode(ARM::MOVsr);
6696    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6697    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6698    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6699    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6700    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6701    TmpInst.addOperand(Inst.getOperand(4));
6702    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6703    Inst = TmpInst;
6704    return true;
6705  }
6706  case ARM::ASRi:
6707  case ARM::LSRi:
6708  case ARM::LSLi:
6709  case ARM::RORi: {
6710    ARM_AM::ShiftOpc ShiftTy;
6711    switch(Inst.getOpcode()) {
6712    default: llvm_unreachable("unexpected opcode!");
6713    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6714    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6715    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6716    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6717    }
6718    // A shift by zero is a plain MOVr, not a MOVsi.
6719    unsigned Amt = Inst.getOperand(2).getImm();
6720    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6721    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6722    MCInst TmpInst;
6723    TmpInst.setOpcode(Opc);
6724    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6725    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6726    if (Opc == ARM::MOVsi)
6727      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6728    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6729    TmpInst.addOperand(Inst.getOperand(4));
6730    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6731    Inst = TmpInst;
6732    return true;
6733  }
6734  case ARM::RRXi: {
6735    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6736    MCInst TmpInst;
6737    TmpInst.setOpcode(ARM::MOVsi);
6738    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6739    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6740    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6741    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6742    TmpInst.addOperand(Inst.getOperand(3));
6743    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6744    Inst = TmpInst;
6745    return true;
6746  }
6747  case ARM::t2LDMIA_UPD: {
6748    // If this is a load of a single register, then we should use
6749    // a post-indexed LDR instruction instead, per the ARM ARM.
6750    if (Inst.getNumOperands() != 5)
6751      return false;
6752    MCInst TmpInst;
6753    TmpInst.setOpcode(ARM::t2LDR_POST);
6754    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6755    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6756    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6757    TmpInst.addOperand(MCOperand::CreateImm(4));
6758    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6759    TmpInst.addOperand(Inst.getOperand(3));
6760    Inst = TmpInst;
6761    return true;
6762  }
6763  case ARM::t2STMDB_UPD: {
6764    // If this is a store of a single register, then we should use
6765    // a pre-indexed STR instruction instead, per the ARM ARM.
6766    if (Inst.getNumOperands() != 5)
6767      return false;
6768    MCInst TmpInst;
6769    TmpInst.setOpcode(ARM::t2STR_PRE);
6770    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6771    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6772    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6773    TmpInst.addOperand(MCOperand::CreateImm(-4));
6774    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6775    TmpInst.addOperand(Inst.getOperand(3));
6776    Inst = TmpInst;
6777    return true;
6778  }
6779  case ARM::LDMIA_UPD:
6780    // If this is a load of a single register via a 'pop', then we should use
6781    // a post-indexed LDR instruction instead, per the ARM ARM.
6782    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6783        Inst.getNumOperands() == 5) {
6784      MCInst TmpInst;
6785      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6786      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6787      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6788      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6789      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6790      TmpInst.addOperand(MCOperand::CreateImm(4));
6791      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6792      TmpInst.addOperand(Inst.getOperand(3));
6793      Inst = TmpInst;
6794      return true;
6795    }
6796    break;
6797  case ARM::STMDB_UPD:
6798    // If this is a store of a single register via a 'push', then we should use
6799    // a pre-indexed STR instruction instead, per the ARM ARM.
6800    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6801        Inst.getNumOperands() == 5) {
6802      MCInst TmpInst;
6803      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6804      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6805      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6806      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6807      TmpInst.addOperand(MCOperand::CreateImm(-4));
6808      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6809      TmpInst.addOperand(Inst.getOperand(3));
6810      Inst = TmpInst;
6811    }
6812    break;
6813  case ARM::t2ADDri12:
6814    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6815    // mnemonic was used (not "addw"), encoding T3 is preferred.
6816    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6817        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6818      break;
6819    Inst.setOpcode(ARM::t2ADDri);
6820    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6821    break;
6822  case ARM::t2SUBri12:
6823    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6824    // mnemonic was used (not "subw"), encoding T3 is preferred.
6825    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6826        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6827      break;
6828    Inst.setOpcode(ARM::t2SUBri);
6829    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6830    break;
6831  case ARM::tADDi8:
6832    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6833    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6834    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6835    // to encoding T1 if <Rd> is omitted."
6836    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6837      Inst.setOpcode(ARM::tADDi3);
6838      return true;
6839    }
6840    break;
6841  case ARM::tSUBi8:
6842    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6843    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6844    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6845    // to encoding T1 if <Rd> is omitted."
6846    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6847      Inst.setOpcode(ARM::tSUBi3);
6848      return true;
6849    }
6850    break;
6851  case ARM::t2ADDrr: {
6852    // If the destination and first source operand are the same, and
6853    // there's no setting of the flags, use encoding T2 instead of T3.
6854    // Note that this is only for ADD, not SUB. This mirrors the system
6855    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6856    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6857        Inst.getOperand(5).getReg() != 0 ||
6858        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6859         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6860      break;
6861    MCInst TmpInst;
6862    TmpInst.setOpcode(ARM::tADDhirr);
6863    TmpInst.addOperand(Inst.getOperand(0));
6864    TmpInst.addOperand(Inst.getOperand(0));
6865    TmpInst.addOperand(Inst.getOperand(2));
6866    TmpInst.addOperand(Inst.getOperand(3));
6867    TmpInst.addOperand(Inst.getOperand(4));
6868    Inst = TmpInst;
6869    return true;
6870  }
6871  case ARM::tB:
6872    // A Thumb conditional branch outside of an IT block is a tBcc.
6873    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6874      Inst.setOpcode(ARM::tBcc);
6875      return true;
6876    }
6877    break;
6878  case ARM::t2B:
6879    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6880    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6881      Inst.setOpcode(ARM::t2Bcc);
6882      return true;
6883    }
6884    break;
6885  case ARM::t2Bcc:
6886    // If the conditional is AL or we're in an IT block, we really want t2B.
6887    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6888      Inst.setOpcode(ARM::t2B);
6889      return true;
6890    }
6891    break;
6892  case ARM::tBcc:
6893    // If the conditional is AL, we really want tB.
6894    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6895      Inst.setOpcode(ARM::tB);
6896      return true;
6897    }
6898    break;
6899  case ARM::tLDMIA: {
6900    // If the register list contains any high registers, or if the writeback
6901    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6902    // instead if we're in Thumb2. Otherwise, this should have generated
6903    // an error in validateInstruction().
6904    unsigned Rn = Inst.getOperand(0).getReg();
6905    bool hasWritebackToken =
6906      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6907       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6908    bool listContainsBase;
6909    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6910        (!listContainsBase && !hasWritebackToken) ||
6911        (listContainsBase && hasWritebackToken)) {
6912      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6913      assert (isThumbTwo());
6914      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6915      // If we're switching to the updating version, we need to insert
6916      // the writeback tied operand.
6917      if (hasWritebackToken)
6918        Inst.insert(Inst.begin(),
6919                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6920      return true;
6921    }
6922    break;
6923  }
6924  case ARM::tSTMIA_UPD: {
6925    // If the register list contains any high registers, we need to use
6926    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6927    // should have generated an error in validateInstruction().
6928    unsigned Rn = Inst.getOperand(0).getReg();
6929    bool listContainsBase;
6930    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6931      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6932      assert (isThumbTwo());
6933      Inst.setOpcode(ARM::t2STMIA_UPD);
6934      return true;
6935    }
6936    break;
6937  }
6938  case ARM::tPOP: {
6939    bool listContainsBase;
6940    // If the register list contains any high registers, we need to use
6941    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6942    // should have generated an error in validateInstruction().
6943    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6944      return false;
6945    assert (isThumbTwo());
6946    Inst.setOpcode(ARM::t2LDMIA_UPD);
6947    // Add the base register and writeback operands.
6948    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6949    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6950    return true;
6951  }
6952  case ARM::tPUSH: {
6953    bool listContainsBase;
6954    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6955      return false;
6956    assert (isThumbTwo());
6957    Inst.setOpcode(ARM::t2STMDB_UPD);
6958    // Add the base register and writeback operands.
6959    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6960    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6961    return true;
6962  }
6963  case ARM::t2MOVi: {
6964    // If we can use the 16-bit encoding and the user didn't explicitly
6965    // request the 32-bit variant, transform it here.
6966    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6967        Inst.getOperand(1).getImm() <= 255 &&
6968        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6969         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6970        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6971        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6972         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6973      // The operands aren't in the same order for tMOVi8...
6974      MCInst TmpInst;
6975      TmpInst.setOpcode(ARM::tMOVi8);
6976      TmpInst.addOperand(Inst.getOperand(0));
6977      TmpInst.addOperand(Inst.getOperand(4));
6978      TmpInst.addOperand(Inst.getOperand(1));
6979      TmpInst.addOperand(Inst.getOperand(2));
6980      TmpInst.addOperand(Inst.getOperand(3));
6981      Inst = TmpInst;
6982      return true;
6983    }
6984    break;
6985  }
6986  case ARM::t2MOVr: {
6987    // If we can use the 16-bit encoding and the user didn't explicitly
6988    // request the 32-bit variant, transform it here.
6989    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6990        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6991        Inst.getOperand(2).getImm() == ARMCC::AL &&
6992        Inst.getOperand(4).getReg() == ARM::CPSR &&
6993        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6994         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6995      // The operands aren't the same for tMOV[S]r... (no cc_out)
6996      MCInst TmpInst;
6997      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6998      TmpInst.addOperand(Inst.getOperand(0));
6999      TmpInst.addOperand(Inst.getOperand(1));
7000      TmpInst.addOperand(Inst.getOperand(2));
7001      TmpInst.addOperand(Inst.getOperand(3));
7002      Inst = TmpInst;
7003      return true;
7004    }
7005    break;
7006  }
7007  case ARM::t2SXTH:
7008  case ARM::t2SXTB:
7009  case ARM::t2UXTH:
7010  case ARM::t2UXTB: {
7011    // If we can use the 16-bit encoding and the user didn't explicitly
7012    // request the 32-bit variant, transform it here.
7013    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7014        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7015        Inst.getOperand(2).getImm() == 0 &&
7016        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7017         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7018      unsigned NewOpc;
7019      switch (Inst.getOpcode()) {
7020      default: llvm_unreachable("Illegal opcode!");
7021      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7022      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7023      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7024      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7025      }
7026      // The operands aren't the same for thumb1 (no rotate operand).
7027      MCInst TmpInst;
7028      TmpInst.setOpcode(NewOpc);
7029      TmpInst.addOperand(Inst.getOperand(0));
7030      TmpInst.addOperand(Inst.getOperand(1));
7031      TmpInst.addOperand(Inst.getOperand(3));
7032      TmpInst.addOperand(Inst.getOperand(4));
7033      Inst = TmpInst;
7034      return true;
7035    }
7036    break;
7037  }
7038  case ARM::MOVsi: {
7039    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7040    if (SOpc == ARM_AM::rrx) return false;
7041    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7042      // Shifting by zero is accepted as a vanilla 'MOVr'
7043      MCInst TmpInst;
7044      TmpInst.setOpcode(ARM::MOVr);
7045      TmpInst.addOperand(Inst.getOperand(0));
7046      TmpInst.addOperand(Inst.getOperand(1));
7047      TmpInst.addOperand(Inst.getOperand(3));
7048      TmpInst.addOperand(Inst.getOperand(4));
7049      TmpInst.addOperand(Inst.getOperand(5));
7050      Inst = TmpInst;
7051      return true;
7052    }
7053    return false;
7054  }
7055  case ARM::ANDrsi:
7056  case ARM::ORRrsi:
7057  case ARM::EORrsi:
7058  case ARM::BICrsi:
7059  case ARM::SUBrsi:
7060  case ARM::ADDrsi: {
7061    unsigned newOpc;
7062    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7063    if (SOpc == ARM_AM::rrx) return false;
7064    switch (Inst.getOpcode()) {
7065    default: llvm_unreachable("unexpected opcode!");
7066    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7067    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7068    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7069    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7070    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7071    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7072    }
7073    // If the shift is by zero, use the non-shifted instruction definition.
7074    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7075      MCInst TmpInst;
7076      TmpInst.setOpcode(newOpc);
7077      TmpInst.addOperand(Inst.getOperand(0));
7078      TmpInst.addOperand(Inst.getOperand(1));
7079      TmpInst.addOperand(Inst.getOperand(2));
7080      TmpInst.addOperand(Inst.getOperand(4));
7081      TmpInst.addOperand(Inst.getOperand(5));
7082      TmpInst.addOperand(Inst.getOperand(6));
7083      Inst = TmpInst;
7084      return true;
7085    }
7086    return false;
7087  }
7088  case ARM::ITasm:
7089  case ARM::t2IT: {
7090    // The mask bits for all but the first condition are represented as
7091    // the low bit of the condition code value implies 't'. We currently
7092    // always have 1 implies 't', so XOR toggle the bits if the low bit
7093    // of the condition code is zero. The encoding also expects the low
7094    // bit of the condition to be encoded as bit 4 of the mask operand,
7095    // so mask that in if needed
7096    MCOperand &MO = Inst.getOperand(1);
7097    unsigned Mask = MO.getImm();
7098    unsigned OrigMask = Mask;
7099    unsigned TZ = CountTrailingZeros_32(Mask);
7100    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7101      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7102      for (unsigned i = 3; i != TZ; --i)
7103        Mask ^= 1 << i;
7104    } else
7105      Mask |= 0x10;
7106    MO.setImm(Mask);
7107
7108    // Set up the IT block state according to the IT instruction we just
7109    // matched.
7110    assert(!inITBlock() && "nested IT blocks?!");
7111    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7112    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7113    ITState.CurPosition = 0;
7114    ITState.FirstCond = true;
7115    break;
7116  }
7117  }
7118  return false;
7119}
7120
7121unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7122  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7123  // suffix depending on whether they're in an IT block or not.
7124  unsigned Opc = Inst.getOpcode();
7125  const MCInstrDesc &MCID = getInstDesc(Opc);
7126  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7127    assert(MCID.hasOptionalDef() &&
7128           "optionally flag setting instruction missing optional def operand");
7129    assert(MCID.NumOperands == Inst.getNumOperands() &&
7130           "operand count mismatch!");
7131    // Find the optional-def operand (cc_out).
7132    unsigned OpNo;
7133    for (OpNo = 0;
7134         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7135         ++OpNo)
7136      ;
7137    // If we're parsing Thumb1, reject it completely.
7138    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7139      return Match_MnemonicFail;
7140    // If we're parsing Thumb2, which form is legal depends on whether we're
7141    // in an IT block.
7142    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7143        !inITBlock())
7144      return Match_RequiresITBlock;
7145    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7146        inITBlock())
7147      return Match_RequiresNotITBlock;
7148  }
7149  // Some high-register supporting Thumb1 encodings only allow both registers
7150  // to be from r0-r7 when in Thumb2.
7151  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7152           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7153           isARMLowRegister(Inst.getOperand(2).getReg()))
7154    return Match_RequiresThumb2;
7155  // Others only require ARMv6 or later.
7156  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7157           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7158           isARMLowRegister(Inst.getOperand(1).getReg()))
7159    return Match_RequiresV6;
7160  return Match_Success;
7161}
7162
7163bool ARMAsmParser::
7164MatchAndEmitInstruction(SMLoc IDLoc,
7165                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7166                        MCStreamer &Out) {
7167  MCInst Inst;
7168  unsigned ErrorInfo;
7169  unsigned MatchResult;
7170  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7171  switch (MatchResult) {
7172  default: break;
7173  case Match_Success:
7174    // Context sensitive operand constraints aren't handled by the matcher,
7175    // so check them here.
7176    if (validateInstruction(Inst, Operands)) {
7177      // Still progress the IT block, otherwise one wrong condition causes
7178      // nasty cascading errors.
7179      forwardITPosition();
7180      return true;
7181    }
7182
7183    // Some instructions need post-processing to, for example, tweak which
7184    // encoding is selected. Loop on it while changes happen so the
7185    // individual transformations can chain off each other. E.g.,
7186    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7187    while (processInstruction(Inst, Operands))
7188      ;
7189
7190    // Only move forward at the very end so that everything in validate
7191    // and process gets a consistent answer about whether we're in an IT
7192    // block.
7193    forwardITPosition();
7194
7195    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7196    // doesn't actually encode.
7197    if (Inst.getOpcode() == ARM::ITasm)
7198      return false;
7199
7200    Inst.setLoc(IDLoc);
7201    Out.EmitInstruction(Inst);
7202    return false;
7203  case Match_MissingFeature:
7204    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7205    return true;
7206  case Match_InvalidOperand: {
7207    SMLoc ErrorLoc = IDLoc;
7208    if (ErrorInfo != ~0U) {
7209      if (ErrorInfo >= Operands.size())
7210        return Error(IDLoc, "too few operands for instruction");
7211
7212      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7213      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7214    }
7215
7216    return Error(ErrorLoc, "invalid operand for instruction");
7217  }
7218  case Match_MnemonicFail:
7219    return Error(IDLoc, "invalid instruction");
7220  case Match_ConversionFail:
7221    // The converter function will have already emited a diagnostic.
7222    return true;
7223  case Match_RequiresNotITBlock:
7224    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7225  case Match_RequiresITBlock:
7226    return Error(IDLoc, "instruction only valid inside IT block");
7227  case Match_RequiresV6:
7228    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7229  case Match_RequiresThumb2:
7230    return Error(IDLoc, "instruction variant requires Thumb2");
7231  }
7232
7233  llvm_unreachable("Implement any new match types added!");
7234}
7235
7236/// parseDirective parses the arm specific directives
7237bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7238  StringRef IDVal = DirectiveID.getIdentifier();
7239  if (IDVal == ".word")
7240    return parseDirectiveWord(4, DirectiveID.getLoc());
7241  else if (IDVal == ".thumb")
7242    return parseDirectiveThumb(DirectiveID.getLoc());
7243  else if (IDVal == ".arm")
7244    return parseDirectiveARM(DirectiveID.getLoc());
7245  else if (IDVal == ".thumb_func")
7246    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7247  else if (IDVal == ".code")
7248    return parseDirectiveCode(DirectiveID.getLoc());
7249  else if (IDVal == ".syntax")
7250    return parseDirectiveSyntax(DirectiveID.getLoc());
7251  else if (IDVal == ".unreq")
7252    return parseDirectiveUnreq(DirectiveID.getLoc());
7253  else if (IDVal == ".arch")
7254    return parseDirectiveArch(DirectiveID.getLoc());
7255  else if (IDVal == ".eabi_attribute")
7256    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7257  return true;
7258}
7259
7260/// parseDirectiveWord
7261///  ::= .word [ expression (, expression)* ]
7262bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7263  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7264    for (;;) {
7265      const MCExpr *Value;
7266      if (getParser().ParseExpression(Value))
7267        return true;
7268
7269      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7270
7271      if (getLexer().is(AsmToken::EndOfStatement))
7272        break;
7273
7274      // FIXME: Improve diagnostic.
7275      if (getLexer().isNot(AsmToken::Comma))
7276        return Error(L, "unexpected token in directive");
7277      Parser.Lex();
7278    }
7279  }
7280
7281  Parser.Lex();
7282  return false;
7283}
7284
7285/// parseDirectiveThumb
7286///  ::= .thumb
7287bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7288  if (getLexer().isNot(AsmToken::EndOfStatement))
7289    return Error(L, "unexpected token in directive");
7290  Parser.Lex();
7291
7292  if (!isThumb())
7293    SwitchMode();
7294  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7295  return false;
7296}
7297
7298/// parseDirectiveARM
7299///  ::= .arm
7300bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7301  if (getLexer().isNot(AsmToken::EndOfStatement))
7302    return Error(L, "unexpected token in directive");
7303  Parser.Lex();
7304
7305  if (isThumb())
7306    SwitchMode();
7307  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7308  return false;
7309}
7310
7311/// parseDirectiveThumbFunc
7312///  ::= .thumbfunc symbol_name
7313bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7314  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7315  bool isMachO = MAI.hasSubsectionsViaSymbols();
7316  StringRef Name;
7317  bool needFuncName = true;
7318
7319  // Darwin asm has (optionally) function name after .thumb_func direction
7320  // ELF doesn't
7321  if (isMachO) {
7322    const AsmToken &Tok = Parser.getTok();
7323    if (Tok.isNot(AsmToken::EndOfStatement)) {
7324      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7325        return Error(L, "unexpected token in .thumb_func directive");
7326      Name = Tok.getIdentifier();
7327      Parser.Lex(); // Consume the identifier token.
7328      needFuncName = false;
7329    }
7330  }
7331
7332  if (getLexer().isNot(AsmToken::EndOfStatement))
7333    return Error(L, "unexpected token in directive");
7334
7335  // Eat the end of statement and any blank lines that follow.
7336  while (getLexer().is(AsmToken::EndOfStatement))
7337    Parser.Lex();
7338
7339  // FIXME: assuming function name will be the line following .thumb_func
7340  // We really should be checking the next symbol definition even if there's
7341  // stuff in between.
7342  if (needFuncName) {
7343    Name = Parser.getTok().getIdentifier();
7344  }
7345
7346  // Mark symbol as a thumb symbol.
7347  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7348  getParser().getStreamer().EmitThumbFunc(Func);
7349  return false;
7350}
7351
7352/// parseDirectiveSyntax
7353///  ::= .syntax unified | divided
7354bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7355  const AsmToken &Tok = Parser.getTok();
7356  if (Tok.isNot(AsmToken::Identifier))
7357    return Error(L, "unexpected token in .syntax directive");
7358  StringRef Mode = Tok.getString();
7359  if (Mode == "unified" || Mode == "UNIFIED")
7360    Parser.Lex();
7361  else if (Mode == "divided" || Mode == "DIVIDED")
7362    return Error(L, "'.syntax divided' arm asssembly not supported");
7363  else
7364    return Error(L, "unrecognized syntax mode in .syntax directive");
7365
7366  if (getLexer().isNot(AsmToken::EndOfStatement))
7367    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7368  Parser.Lex();
7369
7370  // TODO tell the MC streamer the mode
7371  // getParser().getStreamer().Emit???();
7372  return false;
7373}
7374
7375/// parseDirectiveCode
7376///  ::= .code 16 | 32
7377bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7378  const AsmToken &Tok = Parser.getTok();
7379  if (Tok.isNot(AsmToken::Integer))
7380    return Error(L, "unexpected token in .code directive");
7381  int64_t Val = Parser.getTok().getIntVal();
7382  if (Val == 16)
7383    Parser.Lex();
7384  else if (Val == 32)
7385    Parser.Lex();
7386  else
7387    return Error(L, "invalid operand to .code directive");
7388
7389  if (getLexer().isNot(AsmToken::EndOfStatement))
7390    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7391  Parser.Lex();
7392
7393  if (Val == 16) {
7394    if (!isThumb())
7395      SwitchMode();
7396    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7397  } else {
7398    if (isThumb())
7399      SwitchMode();
7400    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7401  }
7402
7403  return false;
7404}
7405
7406/// parseDirectiveReq
7407///  ::= name .req registername
7408bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7409  Parser.Lex(); // Eat the '.req' token.
7410  unsigned Reg;
7411  SMLoc SRegLoc, ERegLoc;
7412  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7413    Parser.EatToEndOfStatement();
7414    return Error(SRegLoc, "register name expected");
7415  }
7416
7417  // Shouldn't be anything else.
7418  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7419    Parser.EatToEndOfStatement();
7420    return Error(Parser.getTok().getLoc(),
7421                 "unexpected input in .req directive.");
7422  }
7423
7424  Parser.Lex(); // Consume the EndOfStatement
7425
7426  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7427    return Error(SRegLoc, "redefinition of '" + Name +
7428                          "' does not match original.");
7429
7430  return false;
7431}
7432
7433/// parseDirectiveUneq
7434///  ::= .unreq registername
7435bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7436  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7437    Parser.EatToEndOfStatement();
7438    return Error(L, "unexpected input in .unreq directive.");
7439  }
7440  RegisterReqs.erase(Parser.getTok().getIdentifier());
7441  Parser.Lex(); // Eat the identifier.
7442  return false;
7443}
7444
7445/// parseDirectiveArch
7446///  ::= .arch token
7447bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7448  return true;
7449}
7450
7451/// parseDirectiveEabiAttr
7452///  ::= .eabi_attribute int, int
7453bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7454  return true;
7455}
7456
7457extern "C" void LLVMInitializeARMAsmLexer();
7458
7459/// Force static initialization.
7460extern "C" void LLVMInitializeARMAsmParser() {
7461  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7462  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7463  LLVMInitializeARMAsmLexer();
7464}
7465
7466#define GET_REGISTER_MATCHER
7467#define GET_MATCHER_IMPLEMENTATION
7468#include "ARMGenAsmMatcher.inc"
7469