ARMAsmParser.cpp revision af9f4bc752292b3282f110c11aeb2a1ffb710bbf
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
86  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
87
88  int tryParseRegister();
89  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
90  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
93  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
94  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
95  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
96                              unsigned &ShiftAmount);
97  bool parseDirectiveWord(unsigned Size, SMLoc L);
98  bool parseDirectiveThumb(SMLoc L);
99  bool parseDirectiveARM(SMLoc L);
100  bool parseDirectiveThumbFunc(SMLoc L);
101  bool parseDirectiveCode(SMLoc L);
102  bool parseDirectiveSyntax(SMLoc L);
103  bool parseDirectiveReq(StringRef Name, SMLoc L);
104  bool parseDirectiveUnreq(SMLoc L);
105  bool parseDirectiveArch(SMLoc L);
106  bool parseDirectiveEabiAttr(SMLoc L);
107
108  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
109                          bool &CarrySetting, unsigned &ProcessorIMod,
110                          StringRef &ITMask);
111  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
112                             bool &CanAcceptPredicationCode);
113
114  bool isThumb() const {
115    // FIXME: Can tablegen auto-generate this?
116    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
117  }
118  bool isThumbOne() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
120  }
121  bool isThumbTwo() const {
122    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
123  }
124  bool hasV6Ops() const {
125    return STI.getFeatureBits() & ARM::HasV6Ops;
126  }
127  bool hasV7Ops() const {
128    return STI.getFeatureBits() & ARM::HasV7Ops;
129  }
130  void SwitchMode() {
131    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
132    setAvailableFeatures(FB);
133  }
134  bool isMClass() const {
135    return STI.getFeatureBits() & ARM::FeatureMClass;
136  }
137
138  /// @name Auto-generated Match Functions
139  /// {
140
141#define GET_ASSEMBLER_HEADER
142#include "ARMGenAsmMatcher.inc"
143
144  /// }
145
146  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
147  OperandMatchResultTy parseCoprocNumOperand(
148    SmallVectorImpl<MCParsedAsmOperand*>&);
149  OperandMatchResultTy parseCoprocRegOperand(
150    SmallVectorImpl<MCParsedAsmOperand*>&);
151  OperandMatchResultTy parseCoprocOptionOperand(
152    SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseMemBarrierOptOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseProcIFlagsOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseMSRMaskOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
160                                   StringRef Op, int Low, int High);
161  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "lsl", 0, 31);
163  }
164  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
165    return parsePKHImm(O, "asr", 1, 32);
166  }
167  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
176
177  // Asm Match Converter Methods
178  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
179                    const SmallVectorImpl<MCParsedAsmOperand*> &);
180  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
181                    const SmallVectorImpl<MCParsedAsmOperand*> &);
182  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
183                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
184  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
185                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
187                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
197                             const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
199                             const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
201                             const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
205                  const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
207                  const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
209                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
211                        const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
213                     const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
215                        const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
217                     const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
219                        const SmallVectorImpl<MCParsedAsmOperand*> &);
220
221  bool validateInstruction(MCInst &Inst,
222                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
223  bool processInstruction(MCInst &Inst,
224                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
225  bool shouldOmitCCOutOperand(StringRef Mnemonic,
226                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
227
228public:
229  enum ARMMatchResultTy {
230    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
231    Match_RequiresNotITBlock,
232    Match_RequiresV6,
233    Match_RequiresThumb2
234  };
235
236  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
237    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
238    MCAsmParserExtension::Initialize(_Parser);
239
240    // Cache the MCRegisterInfo.
241    MRI = &getContext().getRegisterInfo();
242
243    // Initialize the set of available features.
244    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
245
246    // Not in an ITBlock to start with.
247    ITState.CurPosition = ~0U;
248  }
249
250  // Implementation of the MCTargetAsmParser interface:
251  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
252  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
253                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
254  bool ParseDirective(AsmToken DirectiveID);
255
256  unsigned checkTargetMatchPredicate(MCInst &Inst);
257
258  bool MatchAndEmitInstruction(SMLoc IDLoc,
259                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
260                               MCStreamer &Out);
261};
262} // end anonymous namespace
263
264namespace {
265
266/// ARMOperand - Instances of this class represent a parsed ARM machine
267/// instruction.
268class ARMOperand : public MCParsedAsmOperand {
269  enum KindTy {
270    k_CondCode,
271    k_CCOut,
272    k_ITCondMask,
273    k_CoprocNum,
274    k_CoprocReg,
275    k_CoprocOption,
276    k_Immediate,
277    k_MemBarrierOpt,
278    k_Memory,
279    k_PostIndexRegister,
280    k_MSRMask,
281    k_ProcIFlags,
282    k_VectorIndex,
283    k_Register,
284    k_RegisterList,
285    k_DPRRegisterList,
286    k_SPRRegisterList,
287    k_VectorList,
288    k_VectorListAllLanes,
289    k_VectorListIndexed,
290    k_ShiftedRegister,
291    k_ShiftedImmediate,
292    k_ShifterImmediate,
293    k_RotateImmediate,
294    k_BitfieldDescriptor,
295    k_Token
296  } Kind;
297
298  SMLoc StartLoc, EndLoc;
299  SmallVector<unsigned, 8> Registers;
300
301  union {
302    struct {
303      ARMCC::CondCodes Val;
304    } CC;
305
306    struct {
307      unsigned Val;
308    } Cop;
309
310    struct {
311      unsigned Val;
312    } CoprocOption;
313
314    struct {
315      unsigned Mask:4;
316    } ITMask;
317
318    struct {
319      ARM_MB::MemBOpt Val;
320    } MBOpt;
321
322    struct {
323      ARM_PROC::IFlags Val;
324    } IFlags;
325
326    struct {
327      unsigned Val;
328    } MMask;
329
330    struct {
331      const char *Data;
332      unsigned Length;
333    } Tok;
334
335    struct {
336      unsigned RegNum;
337    } Reg;
338
339    // A vector register list is a sequential list of 1 to 4 registers.
340    struct {
341      unsigned RegNum;
342      unsigned Count;
343      unsigned LaneIndex;
344      bool isDoubleSpaced;
345    } VectorList;
346
347    struct {
348      unsigned Val;
349    } VectorIndex;
350
351    struct {
352      const MCExpr *Val;
353    } Imm;
354
355    /// Combined record for all forms of ARM address expressions.
356    struct {
357      unsigned BaseRegNum;
358      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
359      // was specified.
360      const MCConstantExpr *OffsetImm;  // Offset immediate value
361      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
362      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
363      unsigned ShiftImm;        // shift for OffsetReg.
364      unsigned Alignment;       // 0 = no alignment specified
365                                // n = alignment in bytes (2, 4, 8, 16, or 32)
366      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
367    } Memory;
368
369    struct {
370      unsigned RegNum;
371      bool isAdd;
372      ARM_AM::ShiftOpc ShiftTy;
373      unsigned ShiftImm;
374    } PostIdxReg;
375
376    struct {
377      bool isASR;
378      unsigned Imm;
379    } ShifterImm;
380    struct {
381      ARM_AM::ShiftOpc ShiftTy;
382      unsigned SrcReg;
383      unsigned ShiftReg;
384      unsigned ShiftImm;
385    } RegShiftedReg;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftImm;
390    } RegShiftedImm;
391    struct {
392      unsigned Imm;
393    } RotImm;
394    struct {
395      unsigned LSB;
396      unsigned Width;
397    } Bitfield;
398  };
399
400  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
401public:
402  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
403    Kind = o.Kind;
404    StartLoc = o.StartLoc;
405    EndLoc = o.EndLoc;
406    switch (Kind) {
407    case k_CondCode:
408      CC = o.CC;
409      break;
410    case k_ITCondMask:
411      ITMask = o.ITMask;
412      break;
413    case k_Token:
414      Tok = o.Tok;
415      break;
416    case k_CCOut:
417    case k_Register:
418      Reg = o.Reg;
419      break;
420    case k_RegisterList:
421    case k_DPRRegisterList:
422    case k_SPRRegisterList:
423      Registers = o.Registers;
424      break;
425    case k_VectorList:
426    case k_VectorListAllLanes:
427    case k_VectorListIndexed:
428      VectorList = o.VectorList;
429      break;
430    case k_CoprocNum:
431    case k_CoprocReg:
432      Cop = o.Cop;
433      break;
434    case k_CoprocOption:
435      CoprocOption = o.CoprocOption;
436      break;
437    case k_Immediate:
438      Imm = o.Imm;
439      break;
440    case k_MemBarrierOpt:
441      MBOpt = o.MBOpt;
442      break;
443    case k_Memory:
444      Memory = o.Memory;
445      break;
446    case k_PostIndexRegister:
447      PostIdxReg = o.PostIdxReg;
448      break;
449    case k_MSRMask:
450      MMask = o.MMask;
451      break;
452    case k_ProcIFlags:
453      IFlags = o.IFlags;
454      break;
455    case k_ShifterImmediate:
456      ShifterImm = o.ShifterImm;
457      break;
458    case k_ShiftedRegister:
459      RegShiftedReg = o.RegShiftedReg;
460      break;
461    case k_ShiftedImmediate:
462      RegShiftedImm = o.RegShiftedImm;
463      break;
464    case k_RotateImmediate:
465      RotImm = o.RotImm;
466      break;
467    case k_BitfieldDescriptor:
468      Bitfield = o.Bitfield;
469      break;
470    case k_VectorIndex:
471      VectorIndex = o.VectorIndex;
472      break;
473    }
474  }
475
476  /// getStartLoc - Get the location of the first token of this operand.
477  SMLoc getStartLoc() const { return StartLoc; }
478  /// getEndLoc - Get the location of the last token of this operand.
479  SMLoc getEndLoc() const { return EndLoc; }
480
481  ARMCC::CondCodes getCondCode() const {
482    assert(Kind == k_CondCode && "Invalid access!");
483    return CC.Val;
484  }
485
486  unsigned getCoproc() const {
487    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
488    return Cop.Val;
489  }
490
491  StringRef getToken() const {
492    assert(Kind == k_Token && "Invalid access!");
493    return StringRef(Tok.Data, Tok.Length);
494  }
495
496  unsigned getReg() const {
497    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
498    return Reg.RegNum;
499  }
500
501  const SmallVectorImpl<unsigned> &getRegList() const {
502    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
503            Kind == k_SPRRegisterList) && "Invalid access!");
504    return Registers;
505  }
506
507  const MCExpr *getImm() const {
508    assert(isImm() && "Invalid access!");
509    return Imm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const {
541    if (!isImm()) return false;
542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
543    if (!CE) return false;
544    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
545    return Val != -1;
546  }
547  bool isFBits16() const {
548    if (!isImm()) return false;
549    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
550    if (!CE) return false;
551    int64_t Value = CE->getValue();
552    return Value >= 0 && Value <= 16;
553  }
554  bool isFBits32() const {
555    if (!isImm()) return false;
556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
557    if (!CE) return false;
558    int64_t Value = CE->getValue();
559    return Value >= 1 && Value <= 32;
560  }
561  bool isImm8s4() const {
562    if (!isImm()) return false;
563    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
564    if (!CE) return false;
565    int64_t Value = CE->getValue();
566    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
567  }
568  bool isImm0_1020s4() const {
569    if (!isImm()) return false;
570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
571    if (!CE) return false;
572    int64_t Value = CE->getValue();
573    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
574  }
575  bool isImm0_508s4() const {
576    if (!isImm()) return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
581  }
582  bool isImm0_255() const {
583    if (!isImm()) return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = CE->getValue();
587    return Value >= 0 && Value < 256;
588  }
589  bool isImm0_1() const {
590    if (!isImm()) return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 2;
595  }
596  bool isImm0_3() const {
597    if (!isImm()) return false;
598    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
599    if (!CE) return false;
600    int64_t Value = CE->getValue();
601    return Value >= 0 && Value < 4;
602  }
603  bool isImm0_7() const {
604    if (!isImm()) return false;
605    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
606    if (!CE) return false;
607    int64_t Value = CE->getValue();
608    return Value >= 0 && Value < 8;
609  }
610  bool isImm0_15() const {
611    if (!isImm()) return false;
612    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
613    if (!CE) return false;
614    int64_t Value = CE->getValue();
615    return Value >= 0 && Value < 16;
616  }
617  bool isImm0_31() const {
618    if (!isImm()) return false;
619    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
620    if (!CE) return false;
621    int64_t Value = CE->getValue();
622    return Value >= 0 && Value < 32;
623  }
624  bool isImm0_63() const {
625    if (!isImm()) return false;
626    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
627    if (!CE) return false;
628    int64_t Value = CE->getValue();
629    return Value >= 0 && Value < 64;
630  }
631  bool isImm8() const {
632    if (!isImm()) return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 8;
637  }
638  bool isImm16() const {
639    if (!isImm()) return false;
640    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
641    if (!CE) return false;
642    int64_t Value = CE->getValue();
643    return Value == 16;
644  }
645  bool isImm32() const {
646    if (!isImm()) return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (!isImm()) return false;
654    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
655    if (!CE) return false;
656    int64_t Value = CE->getValue();
657    return Value > 0 && Value <= 8;
658  }
659  bool isShrImm16() const {
660    if (!isImm()) return false;
661    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
662    if (!CE) return false;
663    int64_t Value = CE->getValue();
664    return Value > 0 && Value <= 16;
665  }
666  bool isShrImm32() const {
667    if (!isImm()) return false;
668    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
669    if (!CE) return false;
670    int64_t Value = CE->getValue();
671    return Value > 0 && Value <= 32;
672  }
673  bool isShrImm64() const {
674    if (!isImm()) return false;
675    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
676    if (!CE) return false;
677    int64_t Value = CE->getValue();
678    return Value > 0 && Value <= 64;
679  }
680  bool isImm1_7() const {
681    if (!isImm()) return false;
682    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683    if (!CE) return false;
684    int64_t Value = CE->getValue();
685    return Value > 0 && Value < 8;
686  }
687  bool isImm1_15() const {
688    if (!isImm()) return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value > 0 && Value < 16;
693  }
694  bool isImm1_31() const {
695    if (!isImm()) return false;
696    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
697    if (!CE) return false;
698    int64_t Value = CE->getValue();
699    return Value > 0 && Value < 32;
700  }
701  bool isImm1_16() const {
702    if (!isImm()) return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 17;
707  }
708  bool isImm1_32() const {
709    if (!isImm()) return false;
710    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
711    if (!CE) return false;
712    int64_t Value = CE->getValue();
713    return Value > 0 && Value < 33;
714  }
715  bool isImm0_32() const {
716    if (!isImm()) return false;
717    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
718    if (!CE) return false;
719    int64_t Value = CE->getValue();
720    return Value >= 0 && Value < 33;
721  }
722  bool isImm0_65535() const {
723    if (!isImm()) return false;
724    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
725    if (!CE) return false;
726    int64_t Value = CE->getValue();
727    return Value >= 0 && Value < 65536;
728  }
729  bool isImm0_65535Expr() const {
730    if (!isImm()) return false;
731    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
732    // If it's not a constant expression, it'll generate a fixup and be
733    // handled later.
734    if (!CE) return true;
735    int64_t Value = CE->getValue();
736    return Value >= 0 && Value < 65536;
737  }
738  bool isImm24bit() const {
739    if (!isImm()) return false;
740    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
741    if (!CE) return false;
742    int64_t Value = CE->getValue();
743    return Value >= 0 && Value <= 0xffffff;
744  }
745  bool isImmThumbSR() const {
746    if (!isImm()) return false;
747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748    if (!CE) return false;
749    int64_t Value = CE->getValue();
750    return Value > 0 && Value < 33;
751  }
752  bool isPKHLSLImm() const {
753    if (!isImm()) return false;
754    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
755    if (!CE) return false;
756    int64_t Value = CE->getValue();
757    return Value >= 0 && Value < 32;
758  }
759  bool isPKHASRImm() const {
760    if (!isImm()) return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value <= 32;
765  }
766  bool isARMSOImm() const {
767    if (!isImm()) return false;
768    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
769    if (!CE) return false;
770    int64_t Value = CE->getValue();
771    return ARM_AM::getSOImmVal(Value) != -1;
772  }
773  bool isARMSOImmNot() const {
774    if (!isImm()) return false;
775    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
776    if (!CE) return false;
777    int64_t Value = CE->getValue();
778    return ARM_AM::getSOImmVal(~Value) != -1;
779  }
780  bool isARMSOImmNeg() const {
781    if (!isImm()) return false;
782    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
783    if (!CE) return false;
784    int64_t Value = CE->getValue();
785    return ARM_AM::getSOImmVal(-Value) != -1;
786  }
787  bool isT2SOImm() const {
788    if (!isImm()) return false;
789    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
790    if (!CE) return false;
791    int64_t Value = CE->getValue();
792    return ARM_AM::getT2SOImmVal(Value) != -1;
793  }
794  bool isT2SOImmNot() const {
795    if (!isImm()) return false;
796    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
797    if (!CE) return false;
798    int64_t Value = CE->getValue();
799    return ARM_AM::getT2SOImmVal(~Value) != -1;
800  }
801  bool isT2SOImmNeg() const {
802    if (!isImm()) return false;
803    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
804    if (!CE) return false;
805    int64_t Value = CE->getValue();
806    return ARM_AM::getT2SOImmVal(-Value) != -1;
807  }
808  bool isSetEndImm() const {
809    if (!isImm()) return false;
810    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
811    if (!CE) return false;
812    int64_t Value = CE->getValue();
813    return Value == 1 || Value == 0;
814  }
815  bool isReg() const { return Kind == k_Register; }
816  bool isRegList() const { return Kind == k_RegisterList; }
817  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
818  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
819  bool isToken() const { return Kind == k_Token; }
820  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
821  bool isMemory() const { return Kind == k_Memory; }
822  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
823  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
824  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
825  bool isRotImm() const { return Kind == k_RotateImmediate; }
826  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
827  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
828  bool isPostIdxReg() const {
829    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
830  }
831  bool isMemNoOffset(bool alignOK = false) const {
832    if (!isMemory())
833      return false;
834    // No offset of any kind.
835    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
836     (alignOK || Memory.Alignment == 0);
837  }
838  bool isMemPCRelImm12() const {
839    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
840      return false;
841    // Base register must be PC.
842    if (Memory.BaseRegNum != ARM::PC)
843      return false;
844    // Immediate offset in range [-4095, 4095].
845    if (!Memory.OffsetImm) return true;
846    int64_t Val = Memory.OffsetImm->getValue();
847    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
848  }
849  bool isAlignedMemory() const {
850    return isMemNoOffset(true);
851  }
852  bool isAddrMode2() const {
853    if (!isMemory() || Memory.Alignment != 0) return false;
854    // Check for register offset.
855    if (Memory.OffsetRegNum) return true;
856    // Immediate offset in range [-4095, 4095].
857    if (!Memory.OffsetImm) return true;
858    int64_t Val = Memory.OffsetImm->getValue();
859    return Val > -4096 && Val < 4096;
860  }
861  bool isAM2OffsetImm() const {
862    if (!isImm()) return false;
863    // Immediate offset in range [-4095, 4095].
864    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
865    if (!CE) return false;
866    int64_t Val = CE->getValue();
867    return Val > -4096 && Val < 4096;
868  }
869  bool isAddrMode3() const {
870    // If we have an immediate that's not a constant, treat it as a label
871    // reference needing a fixup. If it is a constant, it's something else
872    // and we reject it.
873    if (isImm() && !isa<MCConstantExpr>(getImm()))
874      return true;
875    if (!isMemory() || Memory.Alignment != 0) return false;
876    // No shifts are legal for AM3.
877    if (Memory.ShiftType != ARM_AM::no_shift) return false;
878    // Check for register offset.
879    if (Memory.OffsetRegNum) return true;
880    // Immediate offset in range [-255, 255].
881    if (!Memory.OffsetImm) return true;
882    int64_t Val = Memory.OffsetImm->getValue();
883    return Val > -256 && Val < 256;
884  }
885  bool isAM3Offset() const {
886    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
887      return false;
888    if (Kind == k_PostIndexRegister)
889      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
890    // Immediate offset in range [-255, 255].
891    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
892    if (!CE) return false;
893    int64_t Val = CE->getValue();
894    // Special case, #-0 is INT32_MIN.
895    return (Val > -256 && Val < 256) || Val == INT32_MIN;
896  }
897  bool isAddrMode5() const {
898    // If we have an immediate that's not a constant, treat it as a label
899    // reference needing a fixup. If it is a constant, it's something else
900    // and we reject it.
901    if (isImm() && !isa<MCConstantExpr>(getImm()))
902      return true;
903    if (!isMemory() || Memory.Alignment != 0) return false;
904    // Check for register offset.
905    if (Memory.OffsetRegNum) return false;
906    // Immediate offset in range [-1020, 1020] and a multiple of 4.
907    if (!Memory.OffsetImm) return true;
908    int64_t Val = Memory.OffsetImm->getValue();
909    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
910      Val == INT32_MIN;
911  }
912  bool isMemTBB() const {
913    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
914        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
915      return false;
916    return true;
917  }
918  bool isMemTBH() const {
919    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
920        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
921        Memory.Alignment != 0 )
922      return false;
923    return true;
924  }
925  bool isMemRegOffset() const {
926    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
927      return false;
928    return true;
929  }
930  bool isT2MemRegOffset() const {
931    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
932        Memory.Alignment != 0)
933      return false;
934    // Only lsl #{0, 1, 2, 3} allowed.
935    if (Memory.ShiftType == ARM_AM::no_shift)
936      return true;
937    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
938      return false;
939    return true;
940  }
941  bool isMemThumbRR() const {
942    // Thumb reg+reg addressing is simple. Just two registers, a base and
943    // an offset. No shifts, negations or any other complicating factors.
944    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
945        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
946      return false;
947    return isARMLowRegister(Memory.BaseRegNum) &&
948      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
949  }
950  bool isMemThumbRIs4() const {
951    if (!isMemory() || Memory.OffsetRegNum != 0 ||
952        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
953      return false;
954    // Immediate offset, multiple of 4 in range [0, 124].
955    if (!Memory.OffsetImm) return true;
956    int64_t Val = Memory.OffsetImm->getValue();
957    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
958  }
959  bool isMemThumbRIs2() const {
960    if (!isMemory() || Memory.OffsetRegNum != 0 ||
961        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
962      return false;
963    // Immediate offset, multiple of 4 in range [0, 62].
964    if (!Memory.OffsetImm) return true;
965    int64_t Val = Memory.OffsetImm->getValue();
966    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
967  }
968  bool isMemThumbRIs1() const {
969    if (!isMemory() || Memory.OffsetRegNum != 0 ||
970        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
971      return false;
972    // Immediate offset in range [0, 31].
973    if (!Memory.OffsetImm) return true;
974    int64_t Val = Memory.OffsetImm->getValue();
975    return Val >= 0 && Val <= 31;
976  }
977  bool isMemThumbSPI() const {
978    if (!isMemory() || Memory.OffsetRegNum != 0 ||
979        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
980      return false;
981    // Immediate offset, multiple of 4 in range [0, 1020].
982    if (!Memory.OffsetImm) return true;
983    int64_t Val = Memory.OffsetImm->getValue();
984    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
985  }
986  bool isMemImm8s4Offset() const {
987    // If we have an immediate that's not a constant, treat it as a label
988    // reference needing a fixup. If it is a constant, it's something else
989    // and we reject it.
990    if (isImm() && !isa<MCConstantExpr>(getImm()))
991      return true;
992    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
993      return false;
994    // Immediate offset a multiple of 4 in range [-1020, 1020].
995    if (!Memory.OffsetImm) return true;
996    int64_t Val = Memory.OffsetImm->getValue();
997    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
998  }
999  bool isMemImm0_1020s4Offset() const {
1000    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1001      return false;
1002    // Immediate offset a multiple of 4 in range [0, 1020].
1003    if (!Memory.OffsetImm) return true;
1004    int64_t Val = Memory.OffsetImm->getValue();
1005    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1006  }
1007  bool isMemImm8Offset() const {
1008    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1009      return false;
1010    // Base reg of PC isn't allowed for these encodings.
1011    if (Memory.BaseRegNum == ARM::PC) return false;
1012    // Immediate offset in range [-255, 255].
1013    if (!Memory.OffsetImm) return true;
1014    int64_t Val = Memory.OffsetImm->getValue();
1015    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1016  }
1017  bool isMemPosImm8Offset() const {
1018    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1019      return false;
1020    // Immediate offset in range [0, 255].
1021    if (!Memory.OffsetImm) return true;
1022    int64_t Val = Memory.OffsetImm->getValue();
1023    return Val >= 0 && Val < 256;
1024  }
1025  bool isMemNegImm8Offset() const {
1026    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1027      return false;
1028    // Base reg of PC isn't allowed for these encodings.
1029    if (Memory.BaseRegNum == ARM::PC) return false;
1030    // Immediate offset in range [-255, -1].
1031    if (!Memory.OffsetImm) return false;
1032    int64_t Val = Memory.OffsetImm->getValue();
1033    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1034  }
1035  bool isMemUImm12Offset() const {
1036    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1037      return false;
1038    // Immediate offset in range [0, 4095].
1039    if (!Memory.OffsetImm) return true;
1040    int64_t Val = Memory.OffsetImm->getValue();
1041    return (Val >= 0 && Val < 4096);
1042  }
1043  bool isMemImm12Offset() const {
1044    // If we have an immediate that's not a constant, treat it as a label
1045    // reference needing a fixup. If it is a constant, it's something else
1046    // and we reject it.
1047    if (isImm() && !isa<MCConstantExpr>(getImm()))
1048      return true;
1049
1050    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1051      return false;
1052    // Immediate offset in range [-4095, 4095].
1053    if (!Memory.OffsetImm) return true;
1054    int64_t Val = Memory.OffsetImm->getValue();
1055    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1056  }
1057  bool isPostIdxImm8() const {
1058    if (!isImm()) return false;
1059    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1060    if (!CE) return false;
1061    int64_t Val = CE->getValue();
1062    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1063  }
1064  bool isPostIdxImm8s4() const {
1065    if (!isImm()) return false;
1066    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1067    if (!CE) return false;
1068    int64_t Val = CE->getValue();
1069    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1070      (Val == INT32_MIN);
1071  }
1072
1073  bool isMSRMask() const { return Kind == k_MSRMask; }
1074  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1075
1076  // NEON operands.
1077  bool isSingleSpacedVectorList() const {
1078    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1079  }
1080  bool isDoubleSpacedVectorList() const {
1081    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1082  }
1083  bool isVecListOneD() const {
1084    if (!isSingleSpacedVectorList()) return false;
1085    return VectorList.Count == 1;
1086  }
1087
1088  bool isVecListDPair() const {
1089    if (!isSingleSpacedVectorList()) return false;
1090    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1091              .contains(VectorList.RegNum));
1092  }
1093
1094  bool isVecListThreeD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 3;
1097  }
1098
1099  bool isVecListFourD() const {
1100    if (!isSingleSpacedVectorList()) return false;
1101    return VectorList.Count == 4;
1102  }
1103
1104  bool isVecListTwoQ() const {
1105    if (!isDoubleSpacedVectorList()) return false;
1106    return VectorList.Count == 2;
1107  }
1108
1109  bool isVecListThreeQ() const {
1110    if (!isDoubleSpacedVectorList()) return false;
1111    return VectorList.Count == 3;
1112  }
1113
1114  bool isVecListFourQ() const {
1115    if (!isDoubleSpacedVectorList()) return false;
1116    return VectorList.Count == 4;
1117  }
1118
1119  bool isSingleSpacedVectorAllLanes() const {
1120    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1121  }
1122  bool isDoubleSpacedVectorAllLanes() const {
1123    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1124  }
1125  bool isVecListOneDAllLanes() const {
1126    if (!isSingleSpacedVectorAllLanes()) return false;
1127    return VectorList.Count == 1;
1128  }
1129
1130  bool isVecListTwoDAllLanes() const {
1131    if (!isSingleSpacedVectorAllLanes()) return false;
1132    return VectorList.Count == 2;
1133  }
1134
1135  bool isVecListTwoQAllLanes() const {
1136    if (!isDoubleSpacedVectorAllLanes()) return false;
1137    return VectorList.Count == 2;
1138  }
1139
1140  bool isVecListThreeDAllLanes() const {
1141    if (!isSingleSpacedVectorAllLanes()) return false;
1142    return VectorList.Count == 3;
1143  }
1144
1145  bool isVecListThreeQAllLanes() const {
1146    if (!isDoubleSpacedVectorAllLanes()) return false;
1147    return VectorList.Count == 3;
1148  }
1149
1150  bool isVecListFourDAllLanes() const {
1151    if (!isSingleSpacedVectorAllLanes()) return false;
1152    return VectorList.Count == 4;
1153  }
1154
1155  bool isVecListFourQAllLanes() const {
1156    if (!isDoubleSpacedVectorAllLanes()) return false;
1157    return VectorList.Count == 4;
1158  }
1159
1160  bool isSingleSpacedVectorIndexed() const {
1161    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1162  }
1163  bool isDoubleSpacedVectorIndexed() const {
1164    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1165  }
1166  bool isVecListOneDByteIndexed() const {
1167    if (!isSingleSpacedVectorIndexed()) return false;
1168    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1169  }
1170
1171  bool isVecListOneDHWordIndexed() const {
1172    if (!isSingleSpacedVectorIndexed()) return false;
1173    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1174  }
1175
1176  bool isVecListOneDWordIndexed() const {
1177    if (!isSingleSpacedVectorIndexed()) return false;
1178    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1179  }
1180
1181  bool isVecListTwoDByteIndexed() const {
1182    if (!isSingleSpacedVectorIndexed()) return false;
1183    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1184  }
1185
1186  bool isVecListTwoDHWordIndexed() const {
1187    if (!isSingleSpacedVectorIndexed()) return false;
1188    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1189  }
1190
1191  bool isVecListTwoQWordIndexed() const {
1192    if (!isDoubleSpacedVectorIndexed()) return false;
1193    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1194  }
1195
1196  bool isVecListTwoQHWordIndexed() const {
1197    if (!isDoubleSpacedVectorIndexed()) return false;
1198    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1199  }
1200
1201  bool isVecListTwoDWordIndexed() const {
1202    if (!isSingleSpacedVectorIndexed()) return false;
1203    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1204  }
1205
1206  bool isVecListThreeDByteIndexed() const {
1207    if (!isSingleSpacedVectorIndexed()) return false;
1208    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1209  }
1210
1211  bool isVecListThreeDHWordIndexed() const {
1212    if (!isSingleSpacedVectorIndexed()) return false;
1213    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1214  }
1215
1216  bool isVecListThreeQWordIndexed() const {
1217    if (!isDoubleSpacedVectorIndexed()) return false;
1218    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1219  }
1220
1221  bool isVecListThreeQHWordIndexed() const {
1222    if (!isDoubleSpacedVectorIndexed()) return false;
1223    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1224  }
1225
1226  bool isVecListThreeDWordIndexed() const {
1227    if (!isSingleSpacedVectorIndexed()) return false;
1228    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1229  }
1230
1231  bool isVecListFourDByteIndexed() const {
1232    if (!isSingleSpacedVectorIndexed()) return false;
1233    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1234  }
1235
1236  bool isVecListFourDHWordIndexed() const {
1237    if (!isSingleSpacedVectorIndexed()) return false;
1238    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1239  }
1240
1241  bool isVecListFourQWordIndexed() const {
1242    if (!isDoubleSpacedVectorIndexed()) return false;
1243    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1244  }
1245
1246  bool isVecListFourQHWordIndexed() const {
1247    if (!isDoubleSpacedVectorIndexed()) return false;
1248    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1249  }
1250
1251  bool isVecListFourDWordIndexed() const {
1252    if (!isSingleSpacedVectorIndexed()) return false;
1253    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1254  }
1255
1256  bool isVectorIndex8() const {
1257    if (Kind != k_VectorIndex) return false;
1258    return VectorIndex.Val < 8;
1259  }
1260  bool isVectorIndex16() const {
1261    if (Kind != k_VectorIndex) return false;
1262    return VectorIndex.Val < 4;
1263  }
1264  bool isVectorIndex32() const {
1265    if (Kind != k_VectorIndex) return false;
1266    return VectorIndex.Val < 2;
1267  }
1268
1269  bool isNEONi8splat() const {
1270    if (!isImm()) return false;
1271    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1272    // Must be a constant.
1273    if (!CE) return false;
1274    int64_t Value = CE->getValue();
1275    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1276    // value.
1277    return Value >= 0 && Value < 256;
1278  }
1279
1280  bool isNEONi16splat() const {
1281    if (!isImm()) return false;
1282    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1283    // Must be a constant.
1284    if (!CE) return false;
1285    int64_t Value = CE->getValue();
1286    // i16 value in the range [0,255] or [0x0100, 0xff00]
1287    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1288  }
1289
1290  bool isNEONi32splat() const {
1291    if (!isImm()) return false;
1292    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1293    // Must be a constant.
1294    if (!CE) return false;
1295    int64_t Value = CE->getValue();
1296    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1297    return (Value >= 0 && Value < 256) ||
1298      (Value >= 0x0100 && Value <= 0xff00) ||
1299      (Value >= 0x010000 && Value <= 0xff0000) ||
1300      (Value >= 0x01000000 && Value <= 0xff000000);
1301  }
1302
1303  bool isNEONi32vmov() const {
1304    if (!isImm()) return false;
1305    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1306    // Must be a constant.
1307    if (!CE) return false;
1308    int64_t Value = CE->getValue();
1309    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1310    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1311    return (Value >= 0 && Value < 256) ||
1312      (Value >= 0x0100 && Value <= 0xff00) ||
1313      (Value >= 0x010000 && Value <= 0xff0000) ||
1314      (Value >= 0x01000000 && Value <= 0xff000000) ||
1315      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1316      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1317  }
1318  bool isNEONi32vmovNeg() const {
1319    if (!isImm()) return false;
1320    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1321    // Must be a constant.
1322    if (!CE) return false;
1323    int64_t Value = ~CE->getValue();
1324    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1325    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1326    return (Value >= 0 && Value < 256) ||
1327      (Value >= 0x0100 && Value <= 0xff00) ||
1328      (Value >= 0x010000 && Value <= 0xff0000) ||
1329      (Value >= 0x01000000 && Value <= 0xff000000) ||
1330      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1331      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1332  }
1333
1334  bool isNEONi64splat() const {
1335    if (!isImm()) return false;
1336    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1337    // Must be a constant.
1338    if (!CE) return false;
1339    uint64_t Value = CE->getValue();
1340    // i64 value with each byte being either 0 or 0xff.
1341    for (unsigned i = 0; i < 8; ++i)
1342      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1343    return true;
1344  }
1345
1346  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1347    // Add as immediates when possible.  Null MCExpr = 0.
1348    if (Expr == 0)
1349      Inst.addOperand(MCOperand::CreateImm(0));
1350    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1351      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1352    else
1353      Inst.addOperand(MCOperand::CreateExpr(Expr));
1354  }
1355
1356  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1357    assert(N == 2 && "Invalid number of operands!");
1358    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1359    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1360    Inst.addOperand(MCOperand::CreateReg(RegNum));
1361  }
1362
1363  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1364    assert(N == 1 && "Invalid number of operands!");
1365    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1366  }
1367
1368  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1369    assert(N == 1 && "Invalid number of operands!");
1370    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1371  }
1372
1373  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1374    assert(N == 1 && "Invalid number of operands!");
1375    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1376  }
1377
1378  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1379    assert(N == 1 && "Invalid number of operands!");
1380    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1381  }
1382
1383  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1386  }
1387
1388  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1389    assert(N == 1 && "Invalid number of operands!");
1390    Inst.addOperand(MCOperand::CreateReg(getReg()));
1391  }
1392
1393  void addRegOperands(MCInst &Inst, unsigned N) const {
1394    assert(N == 1 && "Invalid number of operands!");
1395    Inst.addOperand(MCOperand::CreateReg(getReg()));
1396  }
1397
1398  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1399    assert(N == 3 && "Invalid number of operands!");
1400    assert(isRegShiftedReg() &&
1401           "addRegShiftedRegOperands() on non RegShiftedReg!");
1402    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1403    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1404    Inst.addOperand(MCOperand::CreateImm(
1405      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1406  }
1407
1408  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1409    assert(N == 2 && "Invalid number of operands!");
1410    assert(isRegShiftedImm() &&
1411           "addRegShiftedImmOperands() on non RegShiftedImm!");
1412    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1413    Inst.addOperand(MCOperand::CreateImm(
1414      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1415  }
1416
1417  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1418    assert(N == 1 && "Invalid number of operands!");
1419    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1420                                         ShifterImm.Imm));
1421  }
1422
1423  void addRegListOperands(MCInst &Inst, unsigned N) const {
1424    assert(N == 1 && "Invalid number of operands!");
1425    const SmallVectorImpl<unsigned> &RegList = getRegList();
1426    for (SmallVectorImpl<unsigned>::const_iterator
1427           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1428      Inst.addOperand(MCOperand::CreateReg(*I));
1429  }
1430
1431  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1432    addRegListOperands(Inst, N);
1433  }
1434
1435  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1436    addRegListOperands(Inst, N);
1437  }
1438
1439  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1440    assert(N == 1 && "Invalid number of operands!");
1441    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1442    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1443  }
1444
1445  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1446    assert(N == 1 && "Invalid number of operands!");
1447    // Munge the lsb/width into a bitfield mask.
1448    unsigned lsb = Bitfield.LSB;
1449    unsigned width = Bitfield.Width;
1450    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1451    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1452                      (32 - (lsb + width)));
1453    Inst.addOperand(MCOperand::CreateImm(Mask));
1454  }
1455
1456  void addImmOperands(MCInst &Inst, unsigned N) const {
1457    assert(N == 1 && "Invalid number of operands!");
1458    addExpr(Inst, getImm());
1459  }
1460
1461  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1462    assert(N == 1 && "Invalid number of operands!");
1463    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1464    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1465  }
1466
1467  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1470    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1471  }
1472
1473  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1474    assert(N == 1 && "Invalid number of operands!");
1475    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1476    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1477    Inst.addOperand(MCOperand::CreateImm(Val));
1478  }
1479
1480  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1481    assert(N == 1 && "Invalid number of operands!");
1482    // FIXME: We really want to scale the value here, but the LDRD/STRD
1483    // instruction don't encode operands that way yet.
1484    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1485    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1486  }
1487
1488  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1489    assert(N == 1 && "Invalid number of operands!");
1490    // The immediate is scaled by four in the encoding and is stored
1491    // in the MCInst as such. Lop off the low two bits here.
1492    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1493    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1494  }
1495
1496  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1497    assert(N == 1 && "Invalid number of operands!");
1498    // The immediate is scaled by four in the encoding and is stored
1499    // in the MCInst as such. Lop off the low two bits here.
1500    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1501    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1502  }
1503
1504  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1505    assert(N == 1 && "Invalid number of operands!");
1506    // The constant encodes as the immediate-1, and we store in the instruction
1507    // the bits as encoded, so subtract off one here.
1508    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1509    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1510  }
1511
1512  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1513    assert(N == 1 && "Invalid number of operands!");
1514    // The constant encodes as the immediate-1, and we store in the instruction
1515    // the bits as encoded, so subtract off one here.
1516    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1517    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1518  }
1519
1520  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1521    assert(N == 1 && "Invalid number of operands!");
1522    // The constant encodes as the immediate, except for 32, which encodes as
1523    // zero.
1524    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1525    unsigned Imm = CE->getValue();
1526    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1527  }
1528
1529  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1530    assert(N == 1 && "Invalid number of operands!");
1531    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1532    // the instruction as well.
1533    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1534    int Val = CE->getValue();
1535    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1536  }
1537
1538  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1539    assert(N == 1 && "Invalid number of operands!");
1540    // The operand is actually a t2_so_imm, but we have its bitwise
1541    // negation in the assembly source, so twiddle it here.
1542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1543    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1544  }
1545
1546  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1547    assert(N == 1 && "Invalid number of operands!");
1548    // The operand is actually a t2_so_imm, but we have its
1549    // negation in the assembly source, so twiddle it here.
1550    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1551    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1552  }
1553
1554  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1555    assert(N == 1 && "Invalid number of operands!");
1556    // The operand is actually a so_imm, but we have its bitwise
1557    // negation in the assembly source, so twiddle it here.
1558    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1559    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1560  }
1561
1562  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1563    assert(N == 1 && "Invalid number of operands!");
1564    // The operand is actually a so_imm, but we have its
1565    // negation in the assembly source, so twiddle it here.
1566    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1567    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1568  }
1569
1570  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1571    assert(N == 1 && "Invalid number of operands!");
1572    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1573  }
1574
1575  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1576    assert(N == 1 && "Invalid number of operands!");
1577    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1578  }
1579
1580  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1581    assert(N == 1 && "Invalid number of operands!");
1582    int32_t Imm = Memory.OffsetImm->getValue();
1583    // FIXME: Handle #-0
1584    if (Imm == INT32_MIN) Imm = 0;
1585    Inst.addOperand(MCOperand::CreateImm(Imm));
1586  }
1587
1588  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1589    assert(N == 2 && "Invalid number of operands!");
1590    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1591    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1592  }
1593
1594  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1595    assert(N == 3 && "Invalid number of operands!");
1596    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1597    if (!Memory.OffsetRegNum) {
1598      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1599      // Special case for #-0
1600      if (Val == INT32_MIN) Val = 0;
1601      if (Val < 0) Val = -Val;
1602      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1603    } else {
1604      // For register offset, we encode the shift type and negation flag
1605      // here.
1606      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1607                              Memory.ShiftImm, Memory.ShiftType);
1608    }
1609    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1610    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1611    Inst.addOperand(MCOperand::CreateImm(Val));
1612  }
1613
1614  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1615    assert(N == 2 && "Invalid number of operands!");
1616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1617    assert(CE && "non-constant AM2OffsetImm operand!");
1618    int32_t Val = CE->getValue();
1619    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1620    // Special case for #-0
1621    if (Val == INT32_MIN) Val = 0;
1622    if (Val < 0) Val = -Val;
1623    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1624    Inst.addOperand(MCOperand::CreateReg(0));
1625    Inst.addOperand(MCOperand::CreateImm(Val));
1626  }
1627
1628  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1629    assert(N == 3 && "Invalid number of operands!");
1630    // If we have an immediate that's not a constant, treat it as a label
1631    // reference needing a fixup. If it is a constant, it's something else
1632    // and we reject it.
1633    if (isImm()) {
1634      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1635      Inst.addOperand(MCOperand::CreateReg(0));
1636      Inst.addOperand(MCOperand::CreateImm(0));
1637      return;
1638    }
1639
1640    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1641    if (!Memory.OffsetRegNum) {
1642      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1643      // Special case for #-0
1644      if (Val == INT32_MIN) Val = 0;
1645      if (Val < 0) Val = -Val;
1646      Val = ARM_AM::getAM3Opc(AddSub, Val);
1647    } else {
1648      // For register offset, we encode the shift type and negation flag
1649      // here.
1650      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1651    }
1652    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1653    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1654    Inst.addOperand(MCOperand::CreateImm(Val));
1655  }
1656
1657  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1658    assert(N == 2 && "Invalid number of operands!");
1659    if (Kind == k_PostIndexRegister) {
1660      int32_t Val =
1661        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1662      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1663      Inst.addOperand(MCOperand::CreateImm(Val));
1664      return;
1665    }
1666
1667    // Constant offset.
1668    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1669    int32_t Val = CE->getValue();
1670    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1671    // Special case for #-0
1672    if (Val == INT32_MIN) Val = 0;
1673    if (Val < 0) Val = -Val;
1674    Val = ARM_AM::getAM3Opc(AddSub, Val);
1675    Inst.addOperand(MCOperand::CreateReg(0));
1676    Inst.addOperand(MCOperand::CreateImm(Val));
1677  }
1678
1679  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1680    assert(N == 2 && "Invalid number of operands!");
1681    // If we have an immediate that's not a constant, treat it as a label
1682    // reference needing a fixup. If it is a constant, it's something else
1683    // and we reject it.
1684    if (isImm()) {
1685      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1686      Inst.addOperand(MCOperand::CreateImm(0));
1687      return;
1688    }
1689
1690    // The lower two bits are always zero and as such are not encoded.
1691    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1692    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1693    // Special case for #-0
1694    if (Val == INT32_MIN) Val = 0;
1695    if (Val < 0) Val = -Val;
1696    Val = ARM_AM::getAM5Opc(AddSub, Val);
1697    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1698    Inst.addOperand(MCOperand::CreateImm(Val));
1699  }
1700
1701  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1702    assert(N == 2 && "Invalid number of operands!");
1703    // If we have an immediate that's not a constant, treat it as a label
1704    // reference needing a fixup. If it is a constant, it's something else
1705    // and we reject it.
1706    if (isImm()) {
1707      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1708      Inst.addOperand(MCOperand::CreateImm(0));
1709      return;
1710    }
1711
1712    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1713    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1714    Inst.addOperand(MCOperand::CreateImm(Val));
1715  }
1716
1717  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1718    assert(N == 2 && "Invalid number of operands!");
1719    // The lower two bits are always zero and as such are not encoded.
1720    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1721    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1722    Inst.addOperand(MCOperand::CreateImm(Val));
1723  }
1724
1725  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1726    assert(N == 2 && "Invalid number of operands!");
1727    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1728    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1729    Inst.addOperand(MCOperand::CreateImm(Val));
1730  }
1731
1732  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1733    addMemImm8OffsetOperands(Inst, N);
1734  }
1735
1736  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1737    addMemImm8OffsetOperands(Inst, N);
1738  }
1739
1740  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1741    assert(N == 2 && "Invalid number of operands!");
1742    // If this is an immediate, it's a label reference.
1743    if (isImm()) {
1744      addExpr(Inst, getImm());
1745      Inst.addOperand(MCOperand::CreateImm(0));
1746      return;
1747    }
1748
1749    // Otherwise, it's a normal memory reg+offset.
1750    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1751    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1752    Inst.addOperand(MCOperand::CreateImm(Val));
1753  }
1754
1755  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1756    assert(N == 2 && "Invalid number of operands!");
1757    // If this is an immediate, it's a label reference.
1758    if (isImm()) {
1759      addExpr(Inst, getImm());
1760      Inst.addOperand(MCOperand::CreateImm(0));
1761      return;
1762    }
1763
1764    // Otherwise, it's a normal memory reg+offset.
1765    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1766    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1767    Inst.addOperand(MCOperand::CreateImm(Val));
1768  }
1769
1770  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1771    assert(N == 2 && "Invalid number of operands!");
1772    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1773    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1774  }
1775
1776  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1777    assert(N == 2 && "Invalid number of operands!");
1778    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1779    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1780  }
1781
1782  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1783    assert(N == 3 && "Invalid number of operands!");
1784    unsigned Val =
1785      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1786                        Memory.ShiftImm, Memory.ShiftType);
1787    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1788    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1789    Inst.addOperand(MCOperand::CreateImm(Val));
1790  }
1791
1792  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1793    assert(N == 3 && "Invalid number of operands!");
1794    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1795    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1796    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1797  }
1798
1799  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1800    assert(N == 2 && "Invalid number of operands!");
1801    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1802    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1803  }
1804
1805  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1806    assert(N == 2 && "Invalid number of operands!");
1807    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1808    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1809    Inst.addOperand(MCOperand::CreateImm(Val));
1810  }
1811
1812  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1813    assert(N == 2 && "Invalid number of operands!");
1814    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1815    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1816    Inst.addOperand(MCOperand::CreateImm(Val));
1817  }
1818
1819  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1820    assert(N == 2 && "Invalid number of operands!");
1821    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1822    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1823    Inst.addOperand(MCOperand::CreateImm(Val));
1824  }
1825
1826  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1827    assert(N == 2 && "Invalid number of operands!");
1828    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1829    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1830    Inst.addOperand(MCOperand::CreateImm(Val));
1831  }
1832
1833  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1834    assert(N == 1 && "Invalid number of operands!");
1835    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1836    assert(CE && "non-constant post-idx-imm8 operand!");
1837    int Imm = CE->getValue();
1838    bool isAdd = Imm >= 0;
1839    if (Imm == INT32_MIN) Imm = 0;
1840    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1841    Inst.addOperand(MCOperand::CreateImm(Imm));
1842  }
1843
1844  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1845    assert(N == 1 && "Invalid number of operands!");
1846    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1847    assert(CE && "non-constant post-idx-imm8s4 operand!");
1848    int Imm = CE->getValue();
1849    bool isAdd = Imm >= 0;
1850    if (Imm == INT32_MIN) Imm = 0;
1851    // Immediate is scaled by 4.
1852    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1853    Inst.addOperand(MCOperand::CreateImm(Imm));
1854  }
1855
1856  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1857    assert(N == 2 && "Invalid number of operands!");
1858    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1859    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1860  }
1861
1862  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1863    assert(N == 2 && "Invalid number of operands!");
1864    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1865    // The sign, shift type, and shift amount are encoded in a single operand
1866    // using the AM2 encoding helpers.
1867    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1868    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1869                                     PostIdxReg.ShiftTy);
1870    Inst.addOperand(MCOperand::CreateImm(Imm));
1871  }
1872
1873  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1874    assert(N == 1 && "Invalid number of operands!");
1875    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1876  }
1877
1878  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1879    assert(N == 1 && "Invalid number of operands!");
1880    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1881  }
1882
1883  void addVecListOperands(MCInst &Inst, unsigned N) const {
1884    assert(N == 1 && "Invalid number of operands!");
1885    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1886  }
1887
1888  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1889    assert(N == 2 && "Invalid number of operands!");
1890    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1891    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1892  }
1893
1894  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1895    assert(N == 1 && "Invalid number of operands!");
1896    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1897  }
1898
1899  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1900    assert(N == 1 && "Invalid number of operands!");
1901    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1902  }
1903
1904  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1905    assert(N == 1 && "Invalid number of operands!");
1906    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1907  }
1908
1909  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1910    assert(N == 1 && "Invalid number of operands!");
1911    // The immediate encodes the type of constant as well as the value.
1912    // Mask in that this is an i8 splat.
1913    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1914    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1915  }
1916
1917  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1918    assert(N == 1 && "Invalid number of operands!");
1919    // The immediate encodes the type of constant as well as the value.
1920    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1921    unsigned Value = CE->getValue();
1922    if (Value >= 256)
1923      Value = (Value >> 8) | 0xa00;
1924    else
1925      Value |= 0x800;
1926    Inst.addOperand(MCOperand::CreateImm(Value));
1927  }
1928
1929  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1930    assert(N == 1 && "Invalid number of operands!");
1931    // The immediate encodes the type of constant as well as the value.
1932    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1933    unsigned Value = CE->getValue();
1934    if (Value >= 256 && Value <= 0xff00)
1935      Value = (Value >> 8) | 0x200;
1936    else if (Value > 0xffff && Value <= 0xff0000)
1937      Value = (Value >> 16) | 0x400;
1938    else if (Value > 0xffffff)
1939      Value = (Value >> 24) | 0x600;
1940    Inst.addOperand(MCOperand::CreateImm(Value));
1941  }
1942
1943  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1944    assert(N == 1 && "Invalid number of operands!");
1945    // The immediate encodes the type of constant as well as the value.
1946    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1947    unsigned Value = CE->getValue();
1948    if (Value >= 256 && Value <= 0xffff)
1949      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1950    else if (Value > 0xffff && Value <= 0xffffff)
1951      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1952    else if (Value > 0xffffff)
1953      Value = (Value >> 24) | 0x600;
1954    Inst.addOperand(MCOperand::CreateImm(Value));
1955  }
1956
1957  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1958    assert(N == 1 && "Invalid number of operands!");
1959    // The immediate encodes the type of constant as well as the value.
1960    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1961    unsigned Value = ~CE->getValue();
1962    if (Value >= 256 && Value <= 0xffff)
1963      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1964    else if (Value > 0xffff && Value <= 0xffffff)
1965      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1966    else if (Value > 0xffffff)
1967      Value = (Value >> 24) | 0x600;
1968    Inst.addOperand(MCOperand::CreateImm(Value));
1969  }
1970
1971  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1972    assert(N == 1 && "Invalid number of operands!");
1973    // The immediate encodes the type of constant as well as the value.
1974    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1975    uint64_t Value = CE->getValue();
1976    unsigned Imm = 0;
1977    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1978      Imm |= (Value & 1) << i;
1979    }
1980    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1981  }
1982
1983  virtual void print(raw_ostream &OS) const;
1984
1985  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1986    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1987    Op->ITMask.Mask = Mask;
1988    Op->StartLoc = S;
1989    Op->EndLoc = S;
1990    return Op;
1991  }
1992
1993  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1994    ARMOperand *Op = new ARMOperand(k_CondCode);
1995    Op->CC.Val = CC;
1996    Op->StartLoc = S;
1997    Op->EndLoc = S;
1998    return Op;
1999  }
2000
2001  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2002    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2003    Op->Cop.Val = CopVal;
2004    Op->StartLoc = S;
2005    Op->EndLoc = S;
2006    return Op;
2007  }
2008
2009  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2010    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2011    Op->Cop.Val = CopVal;
2012    Op->StartLoc = S;
2013    Op->EndLoc = S;
2014    return Op;
2015  }
2016
2017  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2018    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2019    Op->Cop.Val = Val;
2020    Op->StartLoc = S;
2021    Op->EndLoc = E;
2022    return Op;
2023  }
2024
2025  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2026    ARMOperand *Op = new ARMOperand(k_CCOut);
2027    Op->Reg.RegNum = RegNum;
2028    Op->StartLoc = S;
2029    Op->EndLoc = S;
2030    return Op;
2031  }
2032
2033  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2034    ARMOperand *Op = new ARMOperand(k_Token);
2035    Op->Tok.Data = Str.data();
2036    Op->Tok.Length = Str.size();
2037    Op->StartLoc = S;
2038    Op->EndLoc = S;
2039    return Op;
2040  }
2041
2042  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2043    ARMOperand *Op = new ARMOperand(k_Register);
2044    Op->Reg.RegNum = RegNum;
2045    Op->StartLoc = S;
2046    Op->EndLoc = E;
2047    return Op;
2048  }
2049
2050  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2051                                           unsigned SrcReg,
2052                                           unsigned ShiftReg,
2053                                           unsigned ShiftImm,
2054                                           SMLoc S, SMLoc E) {
2055    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2056    Op->RegShiftedReg.ShiftTy = ShTy;
2057    Op->RegShiftedReg.SrcReg = SrcReg;
2058    Op->RegShiftedReg.ShiftReg = ShiftReg;
2059    Op->RegShiftedReg.ShiftImm = ShiftImm;
2060    Op->StartLoc = S;
2061    Op->EndLoc = E;
2062    return Op;
2063  }
2064
2065  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2066                                            unsigned SrcReg,
2067                                            unsigned ShiftImm,
2068                                            SMLoc S, SMLoc E) {
2069    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2070    Op->RegShiftedImm.ShiftTy = ShTy;
2071    Op->RegShiftedImm.SrcReg = SrcReg;
2072    Op->RegShiftedImm.ShiftImm = ShiftImm;
2073    Op->StartLoc = S;
2074    Op->EndLoc = E;
2075    return Op;
2076  }
2077
2078  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2079                                   SMLoc S, SMLoc E) {
2080    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2081    Op->ShifterImm.isASR = isASR;
2082    Op->ShifterImm.Imm = Imm;
2083    Op->StartLoc = S;
2084    Op->EndLoc = E;
2085    return Op;
2086  }
2087
2088  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2089    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2090    Op->RotImm.Imm = Imm;
2091    Op->StartLoc = S;
2092    Op->EndLoc = E;
2093    return Op;
2094  }
2095
2096  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2097                                    SMLoc S, SMLoc E) {
2098    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2099    Op->Bitfield.LSB = LSB;
2100    Op->Bitfield.Width = Width;
2101    Op->StartLoc = S;
2102    Op->EndLoc = E;
2103    return Op;
2104  }
2105
2106  static ARMOperand *
2107  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2108                SMLoc StartLoc, SMLoc EndLoc) {
2109    KindTy Kind = k_RegisterList;
2110
2111    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2112      Kind = k_DPRRegisterList;
2113    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2114             contains(Regs.front().first))
2115      Kind = k_SPRRegisterList;
2116
2117    ARMOperand *Op = new ARMOperand(Kind);
2118    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2119           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2120      Op->Registers.push_back(I->first);
2121    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2122    Op->StartLoc = StartLoc;
2123    Op->EndLoc = EndLoc;
2124    return Op;
2125  }
2126
2127  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2128                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2129    ARMOperand *Op = new ARMOperand(k_VectorList);
2130    Op->VectorList.RegNum = RegNum;
2131    Op->VectorList.Count = Count;
2132    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2133    Op->StartLoc = S;
2134    Op->EndLoc = E;
2135    return Op;
2136  }
2137
2138  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2139                                              bool isDoubleSpaced,
2140                                              SMLoc S, SMLoc E) {
2141    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2142    Op->VectorList.RegNum = RegNum;
2143    Op->VectorList.Count = Count;
2144    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2145    Op->StartLoc = S;
2146    Op->EndLoc = E;
2147    return Op;
2148  }
2149
2150  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2151                                             unsigned Index,
2152                                             bool isDoubleSpaced,
2153                                             SMLoc S, SMLoc E) {
2154    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2155    Op->VectorList.RegNum = RegNum;
2156    Op->VectorList.Count = Count;
2157    Op->VectorList.LaneIndex = Index;
2158    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2159    Op->StartLoc = S;
2160    Op->EndLoc = E;
2161    return Op;
2162  }
2163
2164  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2165                                       MCContext &Ctx) {
2166    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2167    Op->VectorIndex.Val = Idx;
2168    Op->StartLoc = S;
2169    Op->EndLoc = E;
2170    return Op;
2171  }
2172
2173  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2174    ARMOperand *Op = new ARMOperand(k_Immediate);
2175    Op->Imm.Val = Val;
2176    Op->StartLoc = S;
2177    Op->EndLoc = E;
2178    return Op;
2179  }
2180
2181  static ARMOperand *CreateMem(unsigned BaseRegNum,
2182                               const MCConstantExpr *OffsetImm,
2183                               unsigned OffsetRegNum,
2184                               ARM_AM::ShiftOpc ShiftType,
2185                               unsigned ShiftImm,
2186                               unsigned Alignment,
2187                               bool isNegative,
2188                               SMLoc S, SMLoc E) {
2189    ARMOperand *Op = new ARMOperand(k_Memory);
2190    Op->Memory.BaseRegNum = BaseRegNum;
2191    Op->Memory.OffsetImm = OffsetImm;
2192    Op->Memory.OffsetRegNum = OffsetRegNum;
2193    Op->Memory.ShiftType = ShiftType;
2194    Op->Memory.ShiftImm = ShiftImm;
2195    Op->Memory.Alignment = Alignment;
2196    Op->Memory.isNegative = isNegative;
2197    Op->StartLoc = S;
2198    Op->EndLoc = E;
2199    return Op;
2200  }
2201
2202  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2203                                      ARM_AM::ShiftOpc ShiftTy,
2204                                      unsigned ShiftImm,
2205                                      SMLoc S, SMLoc E) {
2206    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2207    Op->PostIdxReg.RegNum = RegNum;
2208    Op->PostIdxReg.isAdd = isAdd;
2209    Op->PostIdxReg.ShiftTy = ShiftTy;
2210    Op->PostIdxReg.ShiftImm = ShiftImm;
2211    Op->StartLoc = S;
2212    Op->EndLoc = E;
2213    return Op;
2214  }
2215
2216  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2217    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2218    Op->MBOpt.Val = Opt;
2219    Op->StartLoc = S;
2220    Op->EndLoc = S;
2221    return Op;
2222  }
2223
2224  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2225    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2226    Op->IFlags.Val = IFlags;
2227    Op->StartLoc = S;
2228    Op->EndLoc = S;
2229    return Op;
2230  }
2231
2232  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2233    ARMOperand *Op = new ARMOperand(k_MSRMask);
2234    Op->MMask.Val = MMask;
2235    Op->StartLoc = S;
2236    Op->EndLoc = S;
2237    return Op;
2238  }
2239};
2240
2241} // end anonymous namespace.
2242
2243void ARMOperand::print(raw_ostream &OS) const {
2244  switch (Kind) {
2245  case k_CondCode:
2246    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2247    break;
2248  case k_CCOut:
2249    OS << "<ccout " << getReg() << ">";
2250    break;
2251  case k_ITCondMask: {
2252    static const char *MaskStr[] = {
2253      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2254      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2255    };
2256    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2257    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2258    break;
2259  }
2260  case k_CoprocNum:
2261    OS << "<coprocessor number: " << getCoproc() << ">";
2262    break;
2263  case k_CoprocReg:
2264    OS << "<coprocessor register: " << getCoproc() << ">";
2265    break;
2266  case k_CoprocOption:
2267    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2268    break;
2269  case k_MSRMask:
2270    OS << "<mask: " << getMSRMask() << ">";
2271    break;
2272  case k_Immediate:
2273    getImm()->print(OS);
2274    break;
2275  case k_MemBarrierOpt:
2276    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2277    break;
2278  case k_Memory:
2279    OS << "<memory "
2280       << " base:" << Memory.BaseRegNum;
2281    OS << ">";
2282    break;
2283  case k_PostIndexRegister:
2284    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2285       << PostIdxReg.RegNum;
2286    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2287      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2288         << PostIdxReg.ShiftImm;
2289    OS << ">";
2290    break;
2291  case k_ProcIFlags: {
2292    OS << "<ARM_PROC::";
2293    unsigned IFlags = getProcIFlags();
2294    for (int i=2; i >= 0; --i)
2295      if (IFlags & (1 << i))
2296        OS << ARM_PROC::IFlagsToString(1 << i);
2297    OS << ">";
2298    break;
2299  }
2300  case k_Register:
2301    OS << "<register " << getReg() << ">";
2302    break;
2303  case k_ShifterImmediate:
2304    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2305       << " #" << ShifterImm.Imm << ">";
2306    break;
2307  case k_ShiftedRegister:
2308    OS << "<so_reg_reg "
2309       << RegShiftedReg.SrcReg << " "
2310       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2311       << " " << RegShiftedReg.ShiftReg << ">";
2312    break;
2313  case k_ShiftedImmediate:
2314    OS << "<so_reg_imm "
2315       << RegShiftedImm.SrcReg << " "
2316       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2317       << " #" << RegShiftedImm.ShiftImm << ">";
2318    break;
2319  case k_RotateImmediate:
2320    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2321    break;
2322  case k_BitfieldDescriptor:
2323    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2324       << ", width: " << Bitfield.Width << ">";
2325    break;
2326  case k_RegisterList:
2327  case k_DPRRegisterList:
2328  case k_SPRRegisterList: {
2329    OS << "<register_list ";
2330
2331    const SmallVectorImpl<unsigned> &RegList = getRegList();
2332    for (SmallVectorImpl<unsigned>::const_iterator
2333           I = RegList.begin(), E = RegList.end(); I != E; ) {
2334      OS << *I;
2335      if (++I < E) OS << ", ";
2336    }
2337
2338    OS << ">";
2339    break;
2340  }
2341  case k_VectorList:
2342    OS << "<vector_list " << VectorList.Count << " * "
2343       << VectorList.RegNum << ">";
2344    break;
2345  case k_VectorListAllLanes:
2346    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2347       << VectorList.RegNum << ">";
2348    break;
2349  case k_VectorListIndexed:
2350    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2351       << VectorList.Count << " * " << VectorList.RegNum << ">";
2352    break;
2353  case k_Token:
2354    OS << "'" << getToken() << "'";
2355    break;
2356  case k_VectorIndex:
2357    OS << "<vectorindex " << getVectorIndex() << ">";
2358    break;
2359  }
2360}
2361
2362/// @name Auto-generated Match Functions
2363/// {
2364
2365static unsigned MatchRegisterName(StringRef Name);
2366
2367/// }
2368
2369bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2370                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2371  StartLoc = Parser.getTok().getLoc();
2372  RegNo = tryParseRegister();
2373  EndLoc = Parser.getTok().getLoc();
2374
2375  return (RegNo == (unsigned)-1);
2376}
2377
2378/// Try to parse a register name.  The token must be an Identifier when called,
2379/// and if it is a register name the token is eaten and the register number is
2380/// returned.  Otherwise return -1.
2381///
2382int ARMAsmParser::tryParseRegister() {
2383  const AsmToken &Tok = Parser.getTok();
2384  if (Tok.isNot(AsmToken::Identifier)) return -1;
2385
2386  std::string lowerCase = Tok.getString().lower();
2387  unsigned RegNum = MatchRegisterName(lowerCase);
2388  if (!RegNum) {
2389    RegNum = StringSwitch<unsigned>(lowerCase)
2390      .Case("r13", ARM::SP)
2391      .Case("r14", ARM::LR)
2392      .Case("r15", ARM::PC)
2393      .Case("ip", ARM::R12)
2394      // Additional register name aliases for 'gas' compatibility.
2395      .Case("a1", ARM::R0)
2396      .Case("a2", ARM::R1)
2397      .Case("a3", ARM::R2)
2398      .Case("a4", ARM::R3)
2399      .Case("v1", ARM::R4)
2400      .Case("v2", ARM::R5)
2401      .Case("v3", ARM::R6)
2402      .Case("v4", ARM::R7)
2403      .Case("v5", ARM::R8)
2404      .Case("v6", ARM::R9)
2405      .Case("v7", ARM::R10)
2406      .Case("v8", ARM::R11)
2407      .Case("sb", ARM::R9)
2408      .Case("sl", ARM::R10)
2409      .Case("fp", ARM::R11)
2410      .Default(0);
2411  }
2412  if (!RegNum) {
2413    // Check for aliases registered via .req. Canonicalize to lower case.
2414    // That's more consistent since register names are case insensitive, and
2415    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2416    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2417    // If no match, return failure.
2418    if (Entry == RegisterReqs.end())
2419      return -1;
2420    Parser.Lex(); // Eat identifier token.
2421    return Entry->getValue();
2422  }
2423
2424  Parser.Lex(); // Eat identifier token.
2425
2426  return RegNum;
2427}
2428
2429// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2430// If a recoverable error occurs, return 1. If an irrecoverable error
2431// occurs, return -1. An irrecoverable error is one where tokens have been
2432// consumed in the process of trying to parse the shifter (i.e., when it is
2433// indeed a shifter operand, but malformed).
2434int ARMAsmParser::tryParseShiftRegister(
2435                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2436  SMLoc S = Parser.getTok().getLoc();
2437  const AsmToken &Tok = Parser.getTok();
2438  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2439
2440  std::string lowerCase = Tok.getString().lower();
2441  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2442      .Case("asl", ARM_AM::lsl)
2443      .Case("lsl", ARM_AM::lsl)
2444      .Case("lsr", ARM_AM::lsr)
2445      .Case("asr", ARM_AM::asr)
2446      .Case("ror", ARM_AM::ror)
2447      .Case("rrx", ARM_AM::rrx)
2448      .Default(ARM_AM::no_shift);
2449
2450  if (ShiftTy == ARM_AM::no_shift)
2451    return 1;
2452
2453  Parser.Lex(); // Eat the operator.
2454
2455  // The source register for the shift has already been added to the
2456  // operand list, so we need to pop it off and combine it into the shifted
2457  // register operand instead.
2458  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2459  if (!PrevOp->isReg())
2460    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2461  int SrcReg = PrevOp->getReg();
2462  int64_t Imm = 0;
2463  int ShiftReg = 0;
2464  if (ShiftTy == ARM_AM::rrx) {
2465    // RRX Doesn't have an explicit shift amount. The encoder expects
2466    // the shift register to be the same as the source register. Seems odd,
2467    // but OK.
2468    ShiftReg = SrcReg;
2469  } else {
2470    // Figure out if this is shifted by a constant or a register (for non-RRX).
2471    if (Parser.getTok().is(AsmToken::Hash) ||
2472        Parser.getTok().is(AsmToken::Dollar)) {
2473      Parser.Lex(); // Eat hash.
2474      SMLoc ImmLoc = Parser.getTok().getLoc();
2475      const MCExpr *ShiftExpr = 0;
2476      if (getParser().ParseExpression(ShiftExpr)) {
2477        Error(ImmLoc, "invalid immediate shift value");
2478        return -1;
2479      }
2480      // The expression must be evaluatable as an immediate.
2481      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2482      if (!CE) {
2483        Error(ImmLoc, "invalid immediate shift value");
2484        return -1;
2485      }
2486      // Range check the immediate.
2487      // lsl, ror: 0 <= imm <= 31
2488      // lsr, asr: 0 <= imm <= 32
2489      Imm = CE->getValue();
2490      if (Imm < 0 ||
2491          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2492          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2493        Error(ImmLoc, "immediate shift value out of range");
2494        return -1;
2495      }
2496      // shift by zero is a nop. Always send it through as lsl.
2497      // ('as' compatibility)
2498      if (Imm == 0)
2499        ShiftTy = ARM_AM::lsl;
2500    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2501      ShiftReg = tryParseRegister();
2502      SMLoc L = Parser.getTok().getLoc();
2503      if (ShiftReg == -1) {
2504        Error (L, "expected immediate or register in shift operand");
2505        return -1;
2506      }
2507    } else {
2508      Error (Parser.getTok().getLoc(),
2509                    "expected immediate or register in shift operand");
2510      return -1;
2511    }
2512  }
2513
2514  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2515    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2516                                                         ShiftReg, Imm,
2517                                               S, Parser.getTok().getLoc()));
2518  else
2519    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2520                                               S, Parser.getTok().getLoc()));
2521
2522  return 0;
2523}
2524
2525
2526/// Try to parse a register name.  The token must be an Identifier when called.
2527/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2528/// if there is a "writeback". 'true' if it's not a register.
2529///
2530/// TODO this is likely to change to allow different register types and or to
2531/// parse for a specific register type.
2532bool ARMAsmParser::
2533tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2534  SMLoc S = Parser.getTok().getLoc();
2535  int RegNo = tryParseRegister();
2536  if (RegNo == -1)
2537    return true;
2538
2539  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2540
2541  const AsmToken &ExclaimTok = Parser.getTok();
2542  if (ExclaimTok.is(AsmToken::Exclaim)) {
2543    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2544                                               ExclaimTok.getLoc()));
2545    Parser.Lex(); // Eat exclaim token
2546    return false;
2547  }
2548
2549  // Also check for an index operand. This is only legal for vector registers,
2550  // but that'll get caught OK in operand matching, so we don't need to
2551  // explicitly filter everything else out here.
2552  if (Parser.getTok().is(AsmToken::LBrac)) {
2553    SMLoc SIdx = Parser.getTok().getLoc();
2554    Parser.Lex(); // Eat left bracket token.
2555
2556    const MCExpr *ImmVal;
2557    if (getParser().ParseExpression(ImmVal))
2558      return true;
2559    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2560    if (!MCE)
2561      return TokError("immediate value expected for vector index");
2562
2563    SMLoc E = Parser.getTok().getLoc();
2564    if (Parser.getTok().isNot(AsmToken::RBrac))
2565      return Error(E, "']' expected");
2566
2567    Parser.Lex(); // Eat right bracket token.
2568
2569    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2570                                                     SIdx, E,
2571                                                     getContext()));
2572  }
2573
2574  return false;
2575}
2576
2577/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2578/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2579/// "c5", ...
2580static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2581  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2582  // but efficient.
2583  switch (Name.size()) {
2584  default: return -1;
2585  case 2:
2586    if (Name[0] != CoprocOp)
2587      return -1;
2588    switch (Name[1]) {
2589    default:  return -1;
2590    case '0': return 0;
2591    case '1': return 1;
2592    case '2': return 2;
2593    case '3': return 3;
2594    case '4': return 4;
2595    case '5': return 5;
2596    case '6': return 6;
2597    case '7': return 7;
2598    case '8': return 8;
2599    case '9': return 9;
2600    }
2601  case 3:
2602    if (Name[0] != CoprocOp || Name[1] != '1')
2603      return -1;
2604    switch (Name[2]) {
2605    default:  return -1;
2606    case '0': return 10;
2607    case '1': return 11;
2608    case '2': return 12;
2609    case '3': return 13;
2610    case '4': return 14;
2611    case '5': return 15;
2612    }
2613  }
2614}
2615
2616/// parseITCondCode - Try to parse a condition code for an IT instruction.
2617ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2618parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2619  SMLoc S = Parser.getTok().getLoc();
2620  const AsmToken &Tok = Parser.getTok();
2621  if (!Tok.is(AsmToken::Identifier))
2622    return MatchOperand_NoMatch;
2623  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2624    .Case("eq", ARMCC::EQ)
2625    .Case("ne", ARMCC::NE)
2626    .Case("hs", ARMCC::HS)
2627    .Case("cs", ARMCC::HS)
2628    .Case("lo", ARMCC::LO)
2629    .Case("cc", ARMCC::LO)
2630    .Case("mi", ARMCC::MI)
2631    .Case("pl", ARMCC::PL)
2632    .Case("vs", ARMCC::VS)
2633    .Case("vc", ARMCC::VC)
2634    .Case("hi", ARMCC::HI)
2635    .Case("ls", ARMCC::LS)
2636    .Case("ge", ARMCC::GE)
2637    .Case("lt", ARMCC::LT)
2638    .Case("gt", ARMCC::GT)
2639    .Case("le", ARMCC::LE)
2640    .Case("al", ARMCC::AL)
2641    .Default(~0U);
2642  if (CC == ~0U)
2643    return MatchOperand_NoMatch;
2644  Parser.Lex(); // Eat the token.
2645
2646  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2647
2648  return MatchOperand_Success;
2649}
2650
2651/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2652/// token must be an Identifier when called, and if it is a coprocessor
2653/// number, the token is eaten and the operand is added to the operand list.
2654ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2655parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2656  SMLoc S = Parser.getTok().getLoc();
2657  const AsmToken &Tok = Parser.getTok();
2658  if (Tok.isNot(AsmToken::Identifier))
2659    return MatchOperand_NoMatch;
2660
2661  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2662  if (Num == -1)
2663    return MatchOperand_NoMatch;
2664
2665  Parser.Lex(); // Eat identifier token.
2666  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2667  return MatchOperand_Success;
2668}
2669
2670/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2671/// token must be an Identifier when called, and if it is a coprocessor
2672/// number, the token is eaten and the operand is added to the operand list.
2673ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2674parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2675  SMLoc S = Parser.getTok().getLoc();
2676  const AsmToken &Tok = Parser.getTok();
2677  if (Tok.isNot(AsmToken::Identifier))
2678    return MatchOperand_NoMatch;
2679
2680  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2681  if (Reg == -1)
2682    return MatchOperand_NoMatch;
2683
2684  Parser.Lex(); // Eat identifier token.
2685  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2686  return MatchOperand_Success;
2687}
2688
2689/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2690/// coproc_option : '{' imm0_255 '}'
2691ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2692parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2693  SMLoc S = Parser.getTok().getLoc();
2694
2695  // If this isn't a '{', this isn't a coprocessor immediate operand.
2696  if (Parser.getTok().isNot(AsmToken::LCurly))
2697    return MatchOperand_NoMatch;
2698  Parser.Lex(); // Eat the '{'
2699
2700  const MCExpr *Expr;
2701  SMLoc Loc = Parser.getTok().getLoc();
2702  if (getParser().ParseExpression(Expr)) {
2703    Error(Loc, "illegal expression");
2704    return MatchOperand_ParseFail;
2705  }
2706  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2707  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2708    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2709    return MatchOperand_ParseFail;
2710  }
2711  int Val = CE->getValue();
2712
2713  // Check for and consume the closing '}'
2714  if (Parser.getTok().isNot(AsmToken::RCurly))
2715    return MatchOperand_ParseFail;
2716  SMLoc E = Parser.getTok().getLoc();
2717  Parser.Lex(); // Eat the '}'
2718
2719  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2720  return MatchOperand_Success;
2721}
2722
2723// For register list parsing, we need to map from raw GPR register numbering
2724// to the enumeration values. The enumeration values aren't sorted by
2725// register number due to our using "sp", "lr" and "pc" as canonical names.
2726static unsigned getNextRegister(unsigned Reg) {
2727  // If this is a GPR, we need to do it manually, otherwise we can rely
2728  // on the sort ordering of the enumeration since the other reg-classes
2729  // are sane.
2730  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2731    return Reg + 1;
2732  switch(Reg) {
2733  default: llvm_unreachable("Invalid GPR number!");
2734  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2735  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2736  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2737  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2738  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2739  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2740  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2741  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2742  }
2743}
2744
2745// Return the low-subreg of a given Q register.
2746static unsigned getDRegFromQReg(unsigned QReg) {
2747  switch (QReg) {
2748  default: llvm_unreachable("expected a Q register!");
2749  case ARM::Q0:  return ARM::D0;
2750  case ARM::Q1:  return ARM::D2;
2751  case ARM::Q2:  return ARM::D4;
2752  case ARM::Q3:  return ARM::D6;
2753  case ARM::Q4:  return ARM::D8;
2754  case ARM::Q5:  return ARM::D10;
2755  case ARM::Q6:  return ARM::D12;
2756  case ARM::Q7:  return ARM::D14;
2757  case ARM::Q8:  return ARM::D16;
2758  case ARM::Q9:  return ARM::D18;
2759  case ARM::Q10: return ARM::D20;
2760  case ARM::Q11: return ARM::D22;
2761  case ARM::Q12: return ARM::D24;
2762  case ARM::Q13: return ARM::D26;
2763  case ARM::Q14: return ARM::D28;
2764  case ARM::Q15: return ARM::D30;
2765  }
2766}
2767
2768/// Parse a register list.
2769bool ARMAsmParser::
2770parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2771  assert(Parser.getTok().is(AsmToken::LCurly) &&
2772         "Token is not a Left Curly Brace");
2773  SMLoc S = Parser.getTok().getLoc();
2774  Parser.Lex(); // Eat '{' token.
2775  SMLoc RegLoc = Parser.getTok().getLoc();
2776
2777  // Check the first register in the list to see what register class
2778  // this is a list of.
2779  int Reg = tryParseRegister();
2780  if (Reg == -1)
2781    return Error(RegLoc, "register expected");
2782
2783  // The reglist instructions have at most 16 registers, so reserve
2784  // space for that many.
2785  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2786
2787  // Allow Q regs and just interpret them as the two D sub-registers.
2788  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2789    Reg = getDRegFromQReg(Reg);
2790    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2791    ++Reg;
2792  }
2793  const MCRegisterClass *RC;
2794  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2795    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2796  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2797    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2798  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2799    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2800  else
2801    return Error(RegLoc, "invalid register in register list");
2802
2803  // Store the register.
2804  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2805
2806  // This starts immediately after the first register token in the list,
2807  // so we can see either a comma or a minus (range separator) as a legal
2808  // next token.
2809  while (Parser.getTok().is(AsmToken::Comma) ||
2810         Parser.getTok().is(AsmToken::Minus)) {
2811    if (Parser.getTok().is(AsmToken::Minus)) {
2812      Parser.Lex(); // Eat the minus.
2813      SMLoc EndLoc = Parser.getTok().getLoc();
2814      int EndReg = tryParseRegister();
2815      if (EndReg == -1)
2816        return Error(EndLoc, "register expected");
2817      // Allow Q regs and just interpret them as the two D sub-registers.
2818      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2819        EndReg = getDRegFromQReg(EndReg) + 1;
2820      // If the register is the same as the start reg, there's nothing
2821      // more to do.
2822      if (Reg == EndReg)
2823        continue;
2824      // The register must be in the same register class as the first.
2825      if (!RC->contains(EndReg))
2826        return Error(EndLoc, "invalid register in register list");
2827      // Ranges must go from low to high.
2828      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2829        return Error(EndLoc, "bad range in register list");
2830
2831      // Add all the registers in the range to the register list.
2832      while (Reg != EndReg) {
2833        Reg = getNextRegister(Reg);
2834        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2835      }
2836      continue;
2837    }
2838    Parser.Lex(); // Eat the comma.
2839    RegLoc = Parser.getTok().getLoc();
2840    int OldReg = Reg;
2841    const AsmToken RegTok = Parser.getTok();
2842    Reg = tryParseRegister();
2843    if (Reg == -1)
2844      return Error(RegLoc, "register expected");
2845    // Allow Q regs and just interpret them as the two D sub-registers.
2846    bool isQReg = false;
2847    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2848      Reg = getDRegFromQReg(Reg);
2849      isQReg = true;
2850    }
2851    // The register must be in the same register class as the first.
2852    if (!RC->contains(Reg))
2853      return Error(RegLoc, "invalid register in register list");
2854    // List must be monotonically increasing.
2855    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2856      return Error(RegLoc, "register list not in ascending order");
2857    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2858      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2859              ") in register list");
2860      continue;
2861    }
2862    // VFP register lists must also be contiguous.
2863    // It's OK to use the enumeration values directly here rather, as the
2864    // VFP register classes have the enum sorted properly.
2865    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2866        Reg != OldReg + 1)
2867      return Error(RegLoc, "non-contiguous register range");
2868    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2869    if (isQReg)
2870      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2871  }
2872
2873  SMLoc E = Parser.getTok().getLoc();
2874  if (Parser.getTok().isNot(AsmToken::RCurly))
2875    return Error(E, "'}' expected");
2876  Parser.Lex(); // Eat '}' token.
2877
2878  // Push the register list operand.
2879  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2880
2881  // The ARM system instruction variants for LDM/STM have a '^' token here.
2882  if (Parser.getTok().is(AsmToken::Caret)) {
2883    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2884    Parser.Lex(); // Eat '^' token.
2885  }
2886
2887  return false;
2888}
2889
2890// Helper function to parse the lane index for vector lists.
2891ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2892parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2893  Index = 0; // Always return a defined index value.
2894  if (Parser.getTok().is(AsmToken::LBrac)) {
2895    Parser.Lex(); // Eat the '['.
2896    if (Parser.getTok().is(AsmToken::RBrac)) {
2897      // "Dn[]" is the 'all lanes' syntax.
2898      LaneKind = AllLanes;
2899      Parser.Lex(); // Eat the ']'.
2900      return MatchOperand_Success;
2901    }
2902    const MCExpr *LaneIndex;
2903    SMLoc Loc = Parser.getTok().getLoc();
2904    if (getParser().ParseExpression(LaneIndex)) {
2905      Error(Loc, "illegal expression");
2906      return MatchOperand_ParseFail;
2907    }
2908    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2909    if (!CE) {
2910      Error(Loc, "lane index must be empty or an integer");
2911      return MatchOperand_ParseFail;
2912    }
2913    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2914      Error(Parser.getTok().getLoc(), "']' expected");
2915      return MatchOperand_ParseFail;
2916    }
2917    Parser.Lex(); // Eat the ']'.
2918    int64_t Val = CE->getValue();
2919
2920    // FIXME: Make this range check context sensitive for .8, .16, .32.
2921    if (Val < 0 || Val > 7) {
2922      Error(Parser.getTok().getLoc(), "lane index out of range");
2923      return MatchOperand_ParseFail;
2924    }
2925    Index = Val;
2926    LaneKind = IndexedLane;
2927    return MatchOperand_Success;
2928  }
2929  LaneKind = NoLanes;
2930  return MatchOperand_Success;
2931}
2932
2933// parse a vector register list
2934ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2935parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2936  VectorLaneTy LaneKind;
2937  unsigned LaneIndex;
2938  SMLoc S = Parser.getTok().getLoc();
2939  // As an extension (to match gas), support a plain D register or Q register
2940  // (without encosing curly braces) as a single or double entry list,
2941  // respectively.
2942  if (Parser.getTok().is(AsmToken::Identifier)) {
2943    int Reg = tryParseRegister();
2944    if (Reg == -1)
2945      return MatchOperand_NoMatch;
2946    SMLoc E = Parser.getTok().getLoc();
2947    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2948      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2949      if (Res != MatchOperand_Success)
2950        return Res;
2951      switch (LaneKind) {
2952      case NoLanes:
2953        E = Parser.getTok().getLoc();
2954        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2955        break;
2956      case AllLanes:
2957        E = Parser.getTok().getLoc();
2958        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
2959                                                                S, E));
2960        break;
2961      case IndexedLane:
2962        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2963                                                               LaneIndex,
2964                                                               false, S, E));
2965        break;
2966      }
2967      return MatchOperand_Success;
2968    }
2969    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2970      Reg = getDRegFromQReg(Reg);
2971      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2972      if (Res != MatchOperand_Success)
2973        return Res;
2974      switch (LaneKind) {
2975      case NoLanes:
2976        E = Parser.getTok().getLoc();
2977        // VLD1 wants a DPair register.
2978        // FIXME: Make the rest of the two-reg instructions want the same
2979        // thing.
2980        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
2981                                      &ARMMCRegisterClasses[ARM::DPairRegClassID]);
2982
2983        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2984        break;
2985      case AllLanes:
2986        E = Parser.getTok().getLoc();
2987        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
2988                                                                S, E));
2989        break;
2990      case IndexedLane:
2991        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2992                                                               LaneIndex,
2993                                                               false, S, E));
2994        break;
2995      }
2996      return MatchOperand_Success;
2997    }
2998    Error(S, "vector register expected");
2999    return MatchOperand_ParseFail;
3000  }
3001
3002  if (Parser.getTok().isNot(AsmToken::LCurly))
3003    return MatchOperand_NoMatch;
3004
3005  Parser.Lex(); // Eat '{' token.
3006  SMLoc RegLoc = Parser.getTok().getLoc();
3007
3008  int Reg = tryParseRegister();
3009  if (Reg == -1) {
3010    Error(RegLoc, "register expected");
3011    return MatchOperand_ParseFail;
3012  }
3013  unsigned Count = 1;
3014  int Spacing = 0;
3015  unsigned FirstReg = Reg;
3016  // The list is of D registers, but we also allow Q regs and just interpret
3017  // them as the two D sub-registers.
3018  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3019    FirstReg = Reg = getDRegFromQReg(Reg);
3020    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3021                 // it's ambiguous with four-register single spaced.
3022    ++Reg;
3023    ++Count;
3024  }
3025  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3026    return MatchOperand_ParseFail;
3027
3028  while (Parser.getTok().is(AsmToken::Comma) ||
3029         Parser.getTok().is(AsmToken::Minus)) {
3030    if (Parser.getTok().is(AsmToken::Minus)) {
3031      if (!Spacing)
3032        Spacing = 1; // Register range implies a single spaced list.
3033      else if (Spacing == 2) {
3034        Error(Parser.getTok().getLoc(),
3035              "sequential registers in double spaced list");
3036        return MatchOperand_ParseFail;
3037      }
3038      Parser.Lex(); // Eat the minus.
3039      SMLoc EndLoc = Parser.getTok().getLoc();
3040      int EndReg = tryParseRegister();
3041      if (EndReg == -1) {
3042        Error(EndLoc, "register expected");
3043        return MatchOperand_ParseFail;
3044      }
3045      // Allow Q regs and just interpret them as the two D sub-registers.
3046      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3047        EndReg = getDRegFromQReg(EndReg) + 1;
3048      // If the register is the same as the start reg, there's nothing
3049      // more to do.
3050      if (Reg == EndReg)
3051        continue;
3052      // The register must be in the same register class as the first.
3053      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3054        Error(EndLoc, "invalid register in register list");
3055        return MatchOperand_ParseFail;
3056      }
3057      // Ranges must go from low to high.
3058      if (Reg > EndReg) {
3059        Error(EndLoc, "bad range in register list");
3060        return MatchOperand_ParseFail;
3061      }
3062      // Parse the lane specifier if present.
3063      VectorLaneTy NextLaneKind;
3064      unsigned NextLaneIndex;
3065      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3066        return MatchOperand_ParseFail;
3067      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3068        Error(EndLoc, "mismatched lane index in register list");
3069        return MatchOperand_ParseFail;
3070      }
3071      EndLoc = Parser.getTok().getLoc();
3072
3073      // Add all the registers in the range to the register list.
3074      Count += EndReg - Reg;
3075      Reg = EndReg;
3076      continue;
3077    }
3078    Parser.Lex(); // Eat the comma.
3079    RegLoc = Parser.getTok().getLoc();
3080    int OldReg = Reg;
3081    Reg = tryParseRegister();
3082    if (Reg == -1) {
3083      Error(RegLoc, "register expected");
3084      return MatchOperand_ParseFail;
3085    }
3086    // vector register lists must be contiguous.
3087    // It's OK to use the enumeration values directly here rather, as the
3088    // VFP register classes have the enum sorted properly.
3089    //
3090    // The list is of D registers, but we also allow Q regs and just interpret
3091    // them as the two D sub-registers.
3092    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3093      if (!Spacing)
3094        Spacing = 1; // Register range implies a single spaced list.
3095      else if (Spacing == 2) {
3096        Error(RegLoc,
3097              "invalid register in double-spaced list (must be 'D' register')");
3098        return MatchOperand_ParseFail;
3099      }
3100      Reg = getDRegFromQReg(Reg);
3101      if (Reg != OldReg + 1) {
3102        Error(RegLoc, "non-contiguous register range");
3103        return MatchOperand_ParseFail;
3104      }
3105      ++Reg;
3106      Count += 2;
3107      // Parse the lane specifier if present.
3108      VectorLaneTy NextLaneKind;
3109      unsigned NextLaneIndex;
3110      SMLoc EndLoc = Parser.getTok().getLoc();
3111      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3112        return MatchOperand_ParseFail;
3113      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3114        Error(EndLoc, "mismatched lane index in register list");
3115        return MatchOperand_ParseFail;
3116      }
3117      continue;
3118    }
3119    // Normal D register.
3120    // Figure out the register spacing (single or double) of the list if
3121    // we don't know it already.
3122    if (!Spacing)
3123      Spacing = 1 + (Reg == OldReg + 2);
3124
3125    // Just check that it's contiguous and keep going.
3126    if (Reg != OldReg + Spacing) {
3127      Error(RegLoc, "non-contiguous register range");
3128      return MatchOperand_ParseFail;
3129    }
3130    ++Count;
3131    // Parse the lane specifier if present.
3132    VectorLaneTy NextLaneKind;
3133    unsigned NextLaneIndex;
3134    SMLoc EndLoc = Parser.getTok().getLoc();
3135    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3136      return MatchOperand_ParseFail;
3137    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3138      Error(EndLoc, "mismatched lane index in register list");
3139      return MatchOperand_ParseFail;
3140    }
3141  }
3142
3143  SMLoc E = Parser.getTok().getLoc();
3144  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3145    Error(E, "'}' expected");
3146    return MatchOperand_ParseFail;
3147  }
3148  Parser.Lex(); // Eat '}' token.
3149
3150  switch (LaneKind) {
3151  case NoLanes:
3152    if (Count == 2 && Spacing == 1)
3153      // VLD1 wants a DPair register.
3154      // FIXME: Make the rest of the two-reg instructions want the same
3155      // thing.
3156      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0,
3157                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3158
3159
3160    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3161                                                    (Spacing == 2), S, E));
3162    break;
3163  case AllLanes:
3164    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3165                                                            (Spacing == 2),
3166                                                            S, E));
3167    break;
3168  case IndexedLane:
3169    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3170                                                           LaneIndex,
3171                                                           (Spacing == 2),
3172                                                           S, E));
3173    break;
3174  }
3175  return MatchOperand_Success;
3176}
3177
3178/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3179ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3180parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3181  SMLoc S = Parser.getTok().getLoc();
3182  const AsmToken &Tok = Parser.getTok();
3183  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3184  StringRef OptStr = Tok.getString();
3185
3186  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3187    .Case("sy",    ARM_MB::SY)
3188    .Case("st",    ARM_MB::ST)
3189    .Case("sh",    ARM_MB::ISH)
3190    .Case("ish",   ARM_MB::ISH)
3191    .Case("shst",  ARM_MB::ISHST)
3192    .Case("ishst", ARM_MB::ISHST)
3193    .Case("nsh",   ARM_MB::NSH)
3194    .Case("un",    ARM_MB::NSH)
3195    .Case("nshst", ARM_MB::NSHST)
3196    .Case("unst",  ARM_MB::NSHST)
3197    .Case("osh",   ARM_MB::OSH)
3198    .Case("oshst", ARM_MB::OSHST)
3199    .Default(~0U);
3200
3201  if (Opt == ~0U)
3202    return MatchOperand_NoMatch;
3203
3204  Parser.Lex(); // Eat identifier token.
3205  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3206  return MatchOperand_Success;
3207}
3208
3209/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3210ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3211parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3212  SMLoc S = Parser.getTok().getLoc();
3213  const AsmToken &Tok = Parser.getTok();
3214  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3215  StringRef IFlagsStr = Tok.getString();
3216
3217  // An iflags string of "none" is interpreted to mean that none of the AIF
3218  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3219  unsigned IFlags = 0;
3220  if (IFlagsStr != "none") {
3221        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3222      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3223        .Case("a", ARM_PROC::A)
3224        .Case("i", ARM_PROC::I)
3225        .Case("f", ARM_PROC::F)
3226        .Default(~0U);
3227
3228      // If some specific iflag is already set, it means that some letter is
3229      // present more than once, this is not acceptable.
3230      if (Flag == ~0U || (IFlags & Flag))
3231        return MatchOperand_NoMatch;
3232
3233      IFlags |= Flag;
3234    }
3235  }
3236
3237  Parser.Lex(); // Eat identifier token.
3238  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3239  return MatchOperand_Success;
3240}
3241
3242/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3243ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3244parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3245  SMLoc S = Parser.getTok().getLoc();
3246  const AsmToken &Tok = Parser.getTok();
3247  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3248  StringRef Mask = Tok.getString();
3249
3250  if (isMClass()) {
3251    // See ARMv6-M 10.1.1
3252    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3253      .Case("apsr", 0)
3254      .Case("iapsr", 1)
3255      .Case("eapsr", 2)
3256      .Case("xpsr", 3)
3257      .Case("ipsr", 5)
3258      .Case("epsr", 6)
3259      .Case("iepsr", 7)
3260      .Case("msp", 8)
3261      .Case("psp", 9)
3262      .Case("primask", 16)
3263      .Case("basepri", 17)
3264      .Case("basepri_max", 18)
3265      .Case("faultmask", 19)
3266      .Case("control", 20)
3267      .Default(~0U);
3268
3269    if (FlagsVal == ~0U)
3270      return MatchOperand_NoMatch;
3271
3272    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3273      // basepri, basepri_max and faultmask only valid for V7m.
3274      return MatchOperand_NoMatch;
3275
3276    Parser.Lex(); // Eat identifier token.
3277    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3278    return MatchOperand_Success;
3279  }
3280
3281  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3282  size_t Start = 0, Next = Mask.find('_');
3283  StringRef Flags = "";
3284  std::string SpecReg = Mask.slice(Start, Next).lower();
3285  if (Next != StringRef::npos)
3286    Flags = Mask.slice(Next+1, Mask.size());
3287
3288  // FlagsVal contains the complete mask:
3289  // 3-0: Mask
3290  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3291  unsigned FlagsVal = 0;
3292
3293  if (SpecReg == "apsr") {
3294    FlagsVal = StringSwitch<unsigned>(Flags)
3295    .Case("nzcvq",  0x8) // same as CPSR_f
3296    .Case("g",      0x4) // same as CPSR_s
3297    .Case("nzcvqg", 0xc) // same as CPSR_fs
3298    .Default(~0U);
3299
3300    if (FlagsVal == ~0U) {
3301      if (!Flags.empty())
3302        return MatchOperand_NoMatch;
3303      else
3304        FlagsVal = 8; // No flag
3305    }
3306  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3307    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3308      Flags = "fc";
3309    for (int i = 0, e = Flags.size(); i != e; ++i) {
3310      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3311      .Case("c", 1)
3312      .Case("x", 2)
3313      .Case("s", 4)
3314      .Case("f", 8)
3315      .Default(~0U);
3316
3317      // If some specific flag is already set, it means that some letter is
3318      // present more than once, this is not acceptable.
3319      if (FlagsVal == ~0U || (FlagsVal & Flag))
3320        return MatchOperand_NoMatch;
3321      FlagsVal |= Flag;
3322    }
3323  } else // No match for special register.
3324    return MatchOperand_NoMatch;
3325
3326  // Special register without flags is NOT equivalent to "fc" flags.
3327  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3328  // two lines would enable gas compatibility at the expense of breaking
3329  // round-tripping.
3330  //
3331  // if (!FlagsVal)
3332  //  FlagsVal = 0x9;
3333
3334  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3335  if (SpecReg == "spsr")
3336    FlagsVal |= 16;
3337
3338  Parser.Lex(); // Eat identifier token.
3339  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3340  return MatchOperand_Success;
3341}
3342
3343ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3344parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3345            int Low, int High) {
3346  const AsmToken &Tok = Parser.getTok();
3347  if (Tok.isNot(AsmToken::Identifier)) {
3348    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3349    return MatchOperand_ParseFail;
3350  }
3351  StringRef ShiftName = Tok.getString();
3352  std::string LowerOp = Op.lower();
3353  std::string UpperOp = Op.upper();
3354  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3355    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3356    return MatchOperand_ParseFail;
3357  }
3358  Parser.Lex(); // Eat shift type token.
3359
3360  // There must be a '#' and a shift amount.
3361  if (Parser.getTok().isNot(AsmToken::Hash) &&
3362      Parser.getTok().isNot(AsmToken::Dollar)) {
3363    Error(Parser.getTok().getLoc(), "'#' expected");
3364    return MatchOperand_ParseFail;
3365  }
3366  Parser.Lex(); // Eat hash token.
3367
3368  const MCExpr *ShiftAmount;
3369  SMLoc Loc = Parser.getTok().getLoc();
3370  if (getParser().ParseExpression(ShiftAmount)) {
3371    Error(Loc, "illegal expression");
3372    return MatchOperand_ParseFail;
3373  }
3374  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3375  if (!CE) {
3376    Error(Loc, "constant expression expected");
3377    return MatchOperand_ParseFail;
3378  }
3379  int Val = CE->getValue();
3380  if (Val < Low || Val > High) {
3381    Error(Loc, "immediate value out of range");
3382    return MatchOperand_ParseFail;
3383  }
3384
3385  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3386
3387  return MatchOperand_Success;
3388}
3389
3390ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3391parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3392  const AsmToken &Tok = Parser.getTok();
3393  SMLoc S = Tok.getLoc();
3394  if (Tok.isNot(AsmToken::Identifier)) {
3395    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3396    return MatchOperand_ParseFail;
3397  }
3398  int Val = StringSwitch<int>(Tok.getString())
3399    .Case("be", 1)
3400    .Case("le", 0)
3401    .Default(-1);
3402  Parser.Lex(); // Eat the token.
3403
3404  if (Val == -1) {
3405    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3406    return MatchOperand_ParseFail;
3407  }
3408  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3409                                                                  getContext()),
3410                                           S, Parser.getTok().getLoc()));
3411  return MatchOperand_Success;
3412}
3413
3414/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3415/// instructions. Legal values are:
3416///     lsl #n  'n' in [0,31]
3417///     asr #n  'n' in [1,32]
3418///             n == 32 encoded as n == 0.
3419ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3420parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3421  const AsmToken &Tok = Parser.getTok();
3422  SMLoc S = Tok.getLoc();
3423  if (Tok.isNot(AsmToken::Identifier)) {
3424    Error(S, "shift operator 'asr' or 'lsl' expected");
3425    return MatchOperand_ParseFail;
3426  }
3427  StringRef ShiftName = Tok.getString();
3428  bool isASR;
3429  if (ShiftName == "lsl" || ShiftName == "LSL")
3430    isASR = false;
3431  else if (ShiftName == "asr" || ShiftName == "ASR")
3432    isASR = true;
3433  else {
3434    Error(S, "shift operator 'asr' or 'lsl' expected");
3435    return MatchOperand_ParseFail;
3436  }
3437  Parser.Lex(); // Eat the operator.
3438
3439  // A '#' and a shift amount.
3440  if (Parser.getTok().isNot(AsmToken::Hash) &&
3441      Parser.getTok().isNot(AsmToken::Dollar)) {
3442    Error(Parser.getTok().getLoc(), "'#' expected");
3443    return MatchOperand_ParseFail;
3444  }
3445  Parser.Lex(); // Eat hash token.
3446
3447  const MCExpr *ShiftAmount;
3448  SMLoc E = Parser.getTok().getLoc();
3449  if (getParser().ParseExpression(ShiftAmount)) {
3450    Error(E, "malformed shift expression");
3451    return MatchOperand_ParseFail;
3452  }
3453  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3454  if (!CE) {
3455    Error(E, "shift amount must be an immediate");
3456    return MatchOperand_ParseFail;
3457  }
3458
3459  int64_t Val = CE->getValue();
3460  if (isASR) {
3461    // Shift amount must be in [1,32]
3462    if (Val < 1 || Val > 32) {
3463      Error(E, "'asr' shift amount must be in range [1,32]");
3464      return MatchOperand_ParseFail;
3465    }
3466    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3467    if (isThumb() && Val == 32) {
3468      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3469      return MatchOperand_ParseFail;
3470    }
3471    if (Val == 32) Val = 0;
3472  } else {
3473    // Shift amount must be in [1,32]
3474    if (Val < 0 || Val > 31) {
3475      Error(E, "'lsr' shift amount must be in range [0,31]");
3476      return MatchOperand_ParseFail;
3477    }
3478  }
3479
3480  E = Parser.getTok().getLoc();
3481  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3482
3483  return MatchOperand_Success;
3484}
3485
3486/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3487/// of instructions. Legal values are:
3488///     ror #n  'n' in {0, 8, 16, 24}
3489ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3490parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3491  const AsmToken &Tok = Parser.getTok();
3492  SMLoc S = Tok.getLoc();
3493  if (Tok.isNot(AsmToken::Identifier))
3494    return MatchOperand_NoMatch;
3495  StringRef ShiftName = Tok.getString();
3496  if (ShiftName != "ror" && ShiftName != "ROR")
3497    return MatchOperand_NoMatch;
3498  Parser.Lex(); // Eat the operator.
3499
3500  // A '#' and a rotate amount.
3501  if (Parser.getTok().isNot(AsmToken::Hash) &&
3502      Parser.getTok().isNot(AsmToken::Dollar)) {
3503    Error(Parser.getTok().getLoc(), "'#' expected");
3504    return MatchOperand_ParseFail;
3505  }
3506  Parser.Lex(); // Eat hash token.
3507
3508  const MCExpr *ShiftAmount;
3509  SMLoc E = Parser.getTok().getLoc();
3510  if (getParser().ParseExpression(ShiftAmount)) {
3511    Error(E, "malformed rotate expression");
3512    return MatchOperand_ParseFail;
3513  }
3514  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3515  if (!CE) {
3516    Error(E, "rotate amount must be an immediate");
3517    return MatchOperand_ParseFail;
3518  }
3519
3520  int64_t Val = CE->getValue();
3521  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3522  // normally, zero is represented in asm by omitting the rotate operand
3523  // entirely.
3524  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3525    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3526    return MatchOperand_ParseFail;
3527  }
3528
3529  E = Parser.getTok().getLoc();
3530  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3531
3532  return MatchOperand_Success;
3533}
3534
3535ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3536parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3537  SMLoc S = Parser.getTok().getLoc();
3538  // The bitfield descriptor is really two operands, the LSB and the width.
3539  if (Parser.getTok().isNot(AsmToken::Hash) &&
3540      Parser.getTok().isNot(AsmToken::Dollar)) {
3541    Error(Parser.getTok().getLoc(), "'#' expected");
3542    return MatchOperand_ParseFail;
3543  }
3544  Parser.Lex(); // Eat hash token.
3545
3546  const MCExpr *LSBExpr;
3547  SMLoc E = Parser.getTok().getLoc();
3548  if (getParser().ParseExpression(LSBExpr)) {
3549    Error(E, "malformed immediate expression");
3550    return MatchOperand_ParseFail;
3551  }
3552  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3553  if (!CE) {
3554    Error(E, "'lsb' operand must be an immediate");
3555    return MatchOperand_ParseFail;
3556  }
3557
3558  int64_t LSB = CE->getValue();
3559  // The LSB must be in the range [0,31]
3560  if (LSB < 0 || LSB > 31) {
3561    Error(E, "'lsb' operand must be in the range [0,31]");
3562    return MatchOperand_ParseFail;
3563  }
3564  E = Parser.getTok().getLoc();
3565
3566  // Expect another immediate operand.
3567  if (Parser.getTok().isNot(AsmToken::Comma)) {
3568    Error(Parser.getTok().getLoc(), "too few operands");
3569    return MatchOperand_ParseFail;
3570  }
3571  Parser.Lex(); // Eat hash token.
3572  if (Parser.getTok().isNot(AsmToken::Hash) &&
3573      Parser.getTok().isNot(AsmToken::Dollar)) {
3574    Error(Parser.getTok().getLoc(), "'#' expected");
3575    return MatchOperand_ParseFail;
3576  }
3577  Parser.Lex(); // Eat hash token.
3578
3579  const MCExpr *WidthExpr;
3580  if (getParser().ParseExpression(WidthExpr)) {
3581    Error(E, "malformed immediate expression");
3582    return MatchOperand_ParseFail;
3583  }
3584  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3585  if (!CE) {
3586    Error(E, "'width' operand must be an immediate");
3587    return MatchOperand_ParseFail;
3588  }
3589
3590  int64_t Width = CE->getValue();
3591  // The LSB must be in the range [1,32-lsb]
3592  if (Width < 1 || Width > 32 - LSB) {
3593    Error(E, "'width' operand must be in the range [1,32-lsb]");
3594    return MatchOperand_ParseFail;
3595  }
3596  E = Parser.getTok().getLoc();
3597
3598  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3599
3600  return MatchOperand_Success;
3601}
3602
3603ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3604parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3605  // Check for a post-index addressing register operand. Specifically:
3606  // postidx_reg := '+' register {, shift}
3607  //              | '-' register {, shift}
3608  //              | register {, shift}
3609
3610  // This method must return MatchOperand_NoMatch without consuming any tokens
3611  // in the case where there is no match, as other alternatives take other
3612  // parse methods.
3613  AsmToken Tok = Parser.getTok();
3614  SMLoc S = Tok.getLoc();
3615  bool haveEaten = false;
3616  bool isAdd = true;
3617  int Reg = -1;
3618  if (Tok.is(AsmToken::Plus)) {
3619    Parser.Lex(); // Eat the '+' token.
3620    haveEaten = true;
3621  } else if (Tok.is(AsmToken::Minus)) {
3622    Parser.Lex(); // Eat the '-' token.
3623    isAdd = false;
3624    haveEaten = true;
3625  }
3626  if (Parser.getTok().is(AsmToken::Identifier))
3627    Reg = tryParseRegister();
3628  if (Reg == -1) {
3629    if (!haveEaten)
3630      return MatchOperand_NoMatch;
3631    Error(Parser.getTok().getLoc(), "register expected");
3632    return MatchOperand_ParseFail;
3633  }
3634  SMLoc E = Parser.getTok().getLoc();
3635
3636  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3637  unsigned ShiftImm = 0;
3638  if (Parser.getTok().is(AsmToken::Comma)) {
3639    Parser.Lex(); // Eat the ','.
3640    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3641      return MatchOperand_ParseFail;
3642  }
3643
3644  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3645                                                  ShiftImm, S, E));
3646
3647  return MatchOperand_Success;
3648}
3649
3650ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3651parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3652  // Check for a post-index addressing register operand. Specifically:
3653  // am3offset := '+' register
3654  //              | '-' register
3655  //              | register
3656  //              | # imm
3657  //              | # + imm
3658  //              | # - imm
3659
3660  // This method must return MatchOperand_NoMatch without consuming any tokens
3661  // in the case where there is no match, as other alternatives take other
3662  // parse methods.
3663  AsmToken Tok = Parser.getTok();
3664  SMLoc S = Tok.getLoc();
3665
3666  // Do immediates first, as we always parse those if we have a '#'.
3667  if (Parser.getTok().is(AsmToken::Hash) ||
3668      Parser.getTok().is(AsmToken::Dollar)) {
3669    Parser.Lex(); // Eat the '#'.
3670    // Explicitly look for a '-', as we need to encode negative zero
3671    // differently.
3672    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3673    const MCExpr *Offset;
3674    if (getParser().ParseExpression(Offset))
3675      return MatchOperand_ParseFail;
3676    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3677    if (!CE) {
3678      Error(S, "constant expression expected");
3679      return MatchOperand_ParseFail;
3680    }
3681    SMLoc E = Tok.getLoc();
3682    // Negative zero is encoded as the flag value INT32_MIN.
3683    int32_t Val = CE->getValue();
3684    if (isNegative && Val == 0)
3685      Val = INT32_MIN;
3686
3687    Operands.push_back(
3688      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3689
3690    return MatchOperand_Success;
3691  }
3692
3693
3694  bool haveEaten = false;
3695  bool isAdd = true;
3696  int Reg = -1;
3697  if (Tok.is(AsmToken::Plus)) {
3698    Parser.Lex(); // Eat the '+' token.
3699    haveEaten = true;
3700  } else if (Tok.is(AsmToken::Minus)) {
3701    Parser.Lex(); // Eat the '-' token.
3702    isAdd = false;
3703    haveEaten = true;
3704  }
3705  if (Parser.getTok().is(AsmToken::Identifier))
3706    Reg = tryParseRegister();
3707  if (Reg == -1) {
3708    if (!haveEaten)
3709      return MatchOperand_NoMatch;
3710    Error(Parser.getTok().getLoc(), "register expected");
3711    return MatchOperand_ParseFail;
3712  }
3713  SMLoc E = Parser.getTok().getLoc();
3714
3715  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3716                                                  0, S, E));
3717
3718  return MatchOperand_Success;
3719}
3720
3721/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3722/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3723/// when they refer multiple MIOperands inside a single one.
3724bool ARMAsmParser::
3725cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3726             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3727  // Rt, Rt2
3728  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3729  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3730  // Create a writeback register dummy placeholder.
3731  Inst.addOperand(MCOperand::CreateReg(0));
3732  // addr
3733  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3734  // pred
3735  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3736  return true;
3737}
3738
3739/// cvtT2StrdPre - Convert parsed operands to MCInst.
3740/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3741/// when they refer multiple MIOperands inside a single one.
3742bool ARMAsmParser::
3743cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3744             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3745  // Create a writeback register dummy placeholder.
3746  Inst.addOperand(MCOperand::CreateReg(0));
3747  // Rt, Rt2
3748  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3749  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3750  // addr
3751  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3752  // pred
3753  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3754  return true;
3755}
3756
3757/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3758/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3759/// when they refer multiple MIOperands inside a single one.
3760bool ARMAsmParser::
3761cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3762                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3763  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3764
3765  // Create a writeback register dummy placeholder.
3766  Inst.addOperand(MCOperand::CreateImm(0));
3767
3768  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3769  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3770  return true;
3771}
3772
3773/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3774/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3775/// when they refer multiple MIOperands inside a single one.
3776bool ARMAsmParser::
3777cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3778                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3779  // Create a writeback register dummy placeholder.
3780  Inst.addOperand(MCOperand::CreateImm(0));
3781  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3782  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3783  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3784  return true;
3785}
3786
3787/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3788/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3789/// when they refer multiple MIOperands inside a single one.
3790bool ARMAsmParser::
3791cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3792                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3793  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3794
3795  // Create a writeback register dummy placeholder.
3796  Inst.addOperand(MCOperand::CreateImm(0));
3797
3798  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3799  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3800  return true;
3801}
3802
3803/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3804/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3805/// when they refer multiple MIOperands inside a single one.
3806bool ARMAsmParser::
3807cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3808                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3809  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3810
3811  // Create a writeback register dummy placeholder.
3812  Inst.addOperand(MCOperand::CreateImm(0));
3813
3814  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3815  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3816  return true;
3817}
3818
3819
3820/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3821/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3822/// when they refer multiple MIOperands inside a single one.
3823bool ARMAsmParser::
3824cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3825                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3826  // Create a writeback register dummy placeholder.
3827  Inst.addOperand(MCOperand::CreateImm(0));
3828  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3829  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3830  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3831  return true;
3832}
3833
3834/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3835/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3836/// when they refer multiple MIOperands inside a single one.
3837bool ARMAsmParser::
3838cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3839                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3840  // Create a writeback register dummy placeholder.
3841  Inst.addOperand(MCOperand::CreateImm(0));
3842  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3843  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3844  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3845  return true;
3846}
3847
3848/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3849/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3850/// when they refer multiple MIOperands inside a single one.
3851bool ARMAsmParser::
3852cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3853                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3854  // Create a writeback register dummy placeholder.
3855  Inst.addOperand(MCOperand::CreateImm(0));
3856  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3857  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3858  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3859  return true;
3860}
3861
3862/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3863/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3864/// when they refer multiple MIOperands inside a single one.
3865bool ARMAsmParser::
3866cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3867                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3868  // Rt
3869  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3870  // Create a writeback register dummy placeholder.
3871  Inst.addOperand(MCOperand::CreateImm(0));
3872  // addr
3873  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3874  // offset
3875  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3876  // pred
3877  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3878  return true;
3879}
3880
3881/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3882/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3883/// when they refer multiple MIOperands inside a single one.
3884bool ARMAsmParser::
3885cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3886                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3887  // Rt
3888  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3889  // Create a writeback register dummy placeholder.
3890  Inst.addOperand(MCOperand::CreateImm(0));
3891  // addr
3892  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3893  // offset
3894  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3895  // pred
3896  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3897  return true;
3898}
3899
3900/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3901/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3902/// when they refer multiple MIOperands inside a single one.
3903bool ARMAsmParser::
3904cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3905                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3906  // Create a writeback register dummy placeholder.
3907  Inst.addOperand(MCOperand::CreateImm(0));
3908  // Rt
3909  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3910  // addr
3911  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3912  // offset
3913  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3914  // pred
3915  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3916  return true;
3917}
3918
3919/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3920/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3921/// when they refer multiple MIOperands inside a single one.
3922bool ARMAsmParser::
3923cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3924                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3925  // Create a writeback register dummy placeholder.
3926  Inst.addOperand(MCOperand::CreateImm(0));
3927  // Rt
3928  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3929  // addr
3930  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3931  // offset
3932  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3933  // pred
3934  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3935  return true;
3936}
3937
3938/// cvtLdrdPre - Convert parsed operands to MCInst.
3939/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3940/// when they refer multiple MIOperands inside a single one.
3941bool ARMAsmParser::
3942cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3943           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3944  // Rt, Rt2
3945  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3946  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3947  // Create a writeback register dummy placeholder.
3948  Inst.addOperand(MCOperand::CreateImm(0));
3949  // addr
3950  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3951  // pred
3952  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3953  return true;
3954}
3955
3956/// cvtStrdPre - Convert parsed operands to MCInst.
3957/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3958/// when they refer multiple MIOperands inside a single one.
3959bool ARMAsmParser::
3960cvtStrdPre(MCInst &Inst, unsigned Opcode,
3961           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3962  // Create a writeback register dummy placeholder.
3963  Inst.addOperand(MCOperand::CreateImm(0));
3964  // Rt, Rt2
3965  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3966  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3967  // addr
3968  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3969  // pred
3970  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3971  return true;
3972}
3973
3974/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3975/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3976/// when they refer multiple MIOperands inside a single one.
3977bool ARMAsmParser::
3978cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3979                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3980  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3981  // Create a writeback register dummy placeholder.
3982  Inst.addOperand(MCOperand::CreateImm(0));
3983  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3984  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3985  return true;
3986}
3987
3988/// cvtThumbMultiple- Convert parsed operands to MCInst.
3989/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3990/// when they refer multiple MIOperands inside a single one.
3991bool ARMAsmParser::
3992cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3993           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3994  // The second source operand must be the same register as the destination
3995  // operand.
3996  if (Operands.size() == 6 &&
3997      (((ARMOperand*)Operands[3])->getReg() !=
3998       ((ARMOperand*)Operands[5])->getReg()) &&
3999      (((ARMOperand*)Operands[3])->getReg() !=
4000       ((ARMOperand*)Operands[4])->getReg())) {
4001    Error(Operands[3]->getStartLoc(),
4002          "destination register must match source register");
4003    return false;
4004  }
4005  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4006  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4007  // If we have a three-operand form, make sure to set Rn to be the operand
4008  // that isn't the same as Rd.
4009  unsigned RegOp = 4;
4010  if (Operands.size() == 6 &&
4011      ((ARMOperand*)Operands[4])->getReg() ==
4012        ((ARMOperand*)Operands[3])->getReg())
4013    RegOp = 5;
4014  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4015  Inst.addOperand(Inst.getOperand(0));
4016  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4017
4018  return true;
4019}
4020
4021bool ARMAsmParser::
4022cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4023              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4024  // Vd
4025  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4026  // Create a writeback register dummy placeholder.
4027  Inst.addOperand(MCOperand::CreateImm(0));
4028  // Vn
4029  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4030  // pred
4031  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4032  return true;
4033}
4034
4035bool ARMAsmParser::
4036cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4037                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4038  // Vd
4039  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4040  // Create a writeback register dummy placeholder.
4041  Inst.addOperand(MCOperand::CreateImm(0));
4042  // Vn
4043  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4044  // Vm
4045  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4046  // pred
4047  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4048  return true;
4049}
4050
4051bool ARMAsmParser::
4052cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4053              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4054  // Create a writeback register dummy placeholder.
4055  Inst.addOperand(MCOperand::CreateImm(0));
4056  // Vn
4057  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4058  // Vt
4059  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4060  // pred
4061  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4062  return true;
4063}
4064
4065bool ARMAsmParser::
4066cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4067                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4068  // Create a writeback register dummy placeholder.
4069  Inst.addOperand(MCOperand::CreateImm(0));
4070  // Vn
4071  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4072  // Vm
4073  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4074  // Vt
4075  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4076  // pred
4077  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4078  return true;
4079}
4080
4081/// Parse an ARM memory expression, return false if successful else return true
4082/// or an error.  The first token must be a '[' when called.
4083bool ARMAsmParser::
4084parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4085  SMLoc S, E;
4086  assert(Parser.getTok().is(AsmToken::LBrac) &&
4087         "Token is not a Left Bracket");
4088  S = Parser.getTok().getLoc();
4089  Parser.Lex(); // Eat left bracket token.
4090
4091  const AsmToken &BaseRegTok = Parser.getTok();
4092  int BaseRegNum = tryParseRegister();
4093  if (BaseRegNum == -1)
4094    return Error(BaseRegTok.getLoc(), "register expected");
4095
4096  // The next token must either be a comma or a closing bracket.
4097  const AsmToken &Tok = Parser.getTok();
4098  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4099    return Error(Tok.getLoc(), "malformed memory operand");
4100
4101  if (Tok.is(AsmToken::RBrac)) {
4102    E = Tok.getLoc();
4103    Parser.Lex(); // Eat right bracket token.
4104
4105    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4106                                             0, 0, false, S, E));
4107
4108    // If there's a pre-indexing writeback marker, '!', just add it as a token
4109    // operand. It's rather odd, but syntactically valid.
4110    if (Parser.getTok().is(AsmToken::Exclaim)) {
4111      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4112      Parser.Lex(); // Eat the '!'.
4113    }
4114
4115    return false;
4116  }
4117
4118  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4119  Parser.Lex(); // Eat the comma.
4120
4121  // If we have a ':', it's an alignment specifier.
4122  if (Parser.getTok().is(AsmToken::Colon)) {
4123    Parser.Lex(); // Eat the ':'.
4124    E = Parser.getTok().getLoc();
4125
4126    const MCExpr *Expr;
4127    if (getParser().ParseExpression(Expr))
4128     return true;
4129
4130    // The expression has to be a constant. Memory references with relocations
4131    // don't come through here, as they use the <label> forms of the relevant
4132    // instructions.
4133    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4134    if (!CE)
4135      return Error (E, "constant expression expected");
4136
4137    unsigned Align = 0;
4138    switch (CE->getValue()) {
4139    default:
4140      return Error(E,
4141                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4142    case 16:  Align = 2; break;
4143    case 32:  Align = 4; break;
4144    case 64:  Align = 8; break;
4145    case 128: Align = 16; break;
4146    case 256: Align = 32; break;
4147    }
4148
4149    // Now we should have the closing ']'
4150    E = Parser.getTok().getLoc();
4151    if (Parser.getTok().isNot(AsmToken::RBrac))
4152      return Error(E, "']' expected");
4153    Parser.Lex(); // Eat right bracket token.
4154
4155    // Don't worry about range checking the value here. That's handled by
4156    // the is*() predicates.
4157    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4158                                             ARM_AM::no_shift, 0, Align,
4159                                             false, S, E));
4160
4161    // If there's a pre-indexing writeback marker, '!', just add it as a token
4162    // operand.
4163    if (Parser.getTok().is(AsmToken::Exclaim)) {
4164      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4165      Parser.Lex(); // Eat the '!'.
4166    }
4167
4168    return false;
4169  }
4170
4171  // If we have a '#', it's an immediate offset, else assume it's a register
4172  // offset. Be friendly and also accept a plain integer (without a leading
4173  // hash) for gas compatibility.
4174  if (Parser.getTok().is(AsmToken::Hash) ||
4175      Parser.getTok().is(AsmToken::Dollar) ||
4176      Parser.getTok().is(AsmToken::Integer)) {
4177    if (Parser.getTok().isNot(AsmToken::Integer))
4178      Parser.Lex(); // Eat the '#'.
4179    E = Parser.getTok().getLoc();
4180
4181    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4182    const MCExpr *Offset;
4183    if (getParser().ParseExpression(Offset))
4184     return true;
4185
4186    // The expression has to be a constant. Memory references with relocations
4187    // don't come through here, as they use the <label> forms of the relevant
4188    // instructions.
4189    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4190    if (!CE)
4191      return Error (E, "constant expression expected");
4192
4193    // If the constant was #-0, represent it as INT32_MIN.
4194    int32_t Val = CE->getValue();
4195    if (isNegative && Val == 0)
4196      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4197
4198    // Now we should have the closing ']'
4199    E = Parser.getTok().getLoc();
4200    if (Parser.getTok().isNot(AsmToken::RBrac))
4201      return Error(E, "']' expected");
4202    Parser.Lex(); // Eat right bracket token.
4203
4204    // Don't worry about range checking the value here. That's handled by
4205    // the is*() predicates.
4206    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4207                                             ARM_AM::no_shift, 0, 0,
4208                                             false, S, E));
4209
4210    // If there's a pre-indexing writeback marker, '!', just add it as a token
4211    // operand.
4212    if (Parser.getTok().is(AsmToken::Exclaim)) {
4213      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4214      Parser.Lex(); // Eat the '!'.
4215    }
4216
4217    return false;
4218  }
4219
4220  // The register offset is optionally preceded by a '+' or '-'
4221  bool isNegative = false;
4222  if (Parser.getTok().is(AsmToken::Minus)) {
4223    isNegative = true;
4224    Parser.Lex(); // Eat the '-'.
4225  } else if (Parser.getTok().is(AsmToken::Plus)) {
4226    // Nothing to do.
4227    Parser.Lex(); // Eat the '+'.
4228  }
4229
4230  E = Parser.getTok().getLoc();
4231  int OffsetRegNum = tryParseRegister();
4232  if (OffsetRegNum == -1)
4233    return Error(E, "register expected");
4234
4235  // If there's a shift operator, handle it.
4236  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4237  unsigned ShiftImm = 0;
4238  if (Parser.getTok().is(AsmToken::Comma)) {
4239    Parser.Lex(); // Eat the ','.
4240    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4241      return true;
4242  }
4243
4244  // Now we should have the closing ']'
4245  E = Parser.getTok().getLoc();
4246  if (Parser.getTok().isNot(AsmToken::RBrac))
4247    return Error(E, "']' expected");
4248  Parser.Lex(); // Eat right bracket token.
4249
4250  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4251                                           ShiftType, ShiftImm, 0, isNegative,
4252                                           S, E));
4253
4254  // If there's a pre-indexing writeback marker, '!', just add it as a token
4255  // operand.
4256  if (Parser.getTok().is(AsmToken::Exclaim)) {
4257    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4258    Parser.Lex(); // Eat the '!'.
4259  }
4260
4261  return false;
4262}
4263
4264/// parseMemRegOffsetShift - one of these two:
4265///   ( lsl | lsr | asr | ror ) , # shift_amount
4266///   rrx
4267/// return true if it parses a shift otherwise it returns false.
4268bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4269                                          unsigned &Amount) {
4270  SMLoc Loc = Parser.getTok().getLoc();
4271  const AsmToken &Tok = Parser.getTok();
4272  if (Tok.isNot(AsmToken::Identifier))
4273    return true;
4274  StringRef ShiftName = Tok.getString();
4275  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4276      ShiftName == "asl" || ShiftName == "ASL")
4277    St = ARM_AM::lsl;
4278  else if (ShiftName == "lsr" || ShiftName == "LSR")
4279    St = ARM_AM::lsr;
4280  else if (ShiftName == "asr" || ShiftName == "ASR")
4281    St = ARM_AM::asr;
4282  else if (ShiftName == "ror" || ShiftName == "ROR")
4283    St = ARM_AM::ror;
4284  else if (ShiftName == "rrx" || ShiftName == "RRX")
4285    St = ARM_AM::rrx;
4286  else
4287    return Error(Loc, "illegal shift operator");
4288  Parser.Lex(); // Eat shift type token.
4289
4290  // rrx stands alone.
4291  Amount = 0;
4292  if (St != ARM_AM::rrx) {
4293    Loc = Parser.getTok().getLoc();
4294    // A '#' and a shift amount.
4295    const AsmToken &HashTok = Parser.getTok();
4296    if (HashTok.isNot(AsmToken::Hash) &&
4297        HashTok.isNot(AsmToken::Dollar))
4298      return Error(HashTok.getLoc(), "'#' expected");
4299    Parser.Lex(); // Eat hash token.
4300
4301    const MCExpr *Expr;
4302    if (getParser().ParseExpression(Expr))
4303      return true;
4304    // Range check the immediate.
4305    // lsl, ror: 0 <= imm <= 31
4306    // lsr, asr: 0 <= imm <= 32
4307    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4308    if (!CE)
4309      return Error(Loc, "shift amount must be an immediate");
4310    int64_t Imm = CE->getValue();
4311    if (Imm < 0 ||
4312        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4313        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4314      return Error(Loc, "immediate shift value out of range");
4315    Amount = Imm;
4316  }
4317
4318  return false;
4319}
4320
4321/// parseFPImm - A floating point immediate expression operand.
4322ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4323parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4324  // Anything that can accept a floating point constant as an operand
4325  // needs to go through here, as the regular ParseExpression is
4326  // integer only.
4327  //
4328  // This routine still creates a generic Immediate operand, containing
4329  // a bitcast of the 64-bit floating point value. The various operands
4330  // that accept floats can check whether the value is valid for them
4331  // via the standard is*() predicates.
4332
4333  SMLoc S = Parser.getTok().getLoc();
4334
4335  if (Parser.getTok().isNot(AsmToken::Hash) &&
4336      Parser.getTok().isNot(AsmToken::Dollar))
4337    return MatchOperand_NoMatch;
4338
4339  // Disambiguate the VMOV forms that can accept an FP immediate.
4340  // vmov.f32 <sreg>, #imm
4341  // vmov.f64 <dreg>, #imm
4342  // vmov.f32 <dreg>, #imm  @ vector f32x2
4343  // vmov.f32 <qreg>, #imm  @ vector f32x4
4344  //
4345  // There are also the NEON VMOV instructions which expect an
4346  // integer constant. Make sure we don't try to parse an FPImm
4347  // for these:
4348  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4349  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4350  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4351                           TyOp->getToken() != ".f64"))
4352    return MatchOperand_NoMatch;
4353
4354  Parser.Lex(); // Eat the '#'.
4355
4356  // Handle negation, as that still comes through as a separate token.
4357  bool isNegative = false;
4358  if (Parser.getTok().is(AsmToken::Minus)) {
4359    isNegative = true;
4360    Parser.Lex();
4361  }
4362  const AsmToken &Tok = Parser.getTok();
4363  SMLoc Loc = Tok.getLoc();
4364  if (Tok.is(AsmToken::Real)) {
4365    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4366    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4367    // If we had a '-' in front, toggle the sign bit.
4368    IntVal ^= (uint64_t)isNegative << 31;
4369    Parser.Lex(); // Eat the token.
4370    Operands.push_back(ARMOperand::CreateImm(
4371          MCConstantExpr::Create(IntVal, getContext()),
4372          S, Parser.getTok().getLoc()));
4373    return MatchOperand_Success;
4374  }
4375  // Also handle plain integers. Instructions which allow floating point
4376  // immediates also allow a raw encoded 8-bit value.
4377  if (Tok.is(AsmToken::Integer)) {
4378    int64_t Val = Tok.getIntVal();
4379    Parser.Lex(); // Eat the token.
4380    if (Val > 255 || Val < 0) {
4381      Error(Loc, "encoded floating point value out of range");
4382      return MatchOperand_ParseFail;
4383    }
4384    double RealVal = ARM_AM::getFPImmFloat(Val);
4385    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4386    Operands.push_back(ARMOperand::CreateImm(
4387        MCConstantExpr::Create(Val, getContext()), S,
4388        Parser.getTok().getLoc()));
4389    return MatchOperand_Success;
4390  }
4391
4392  Error(Loc, "invalid floating point immediate");
4393  return MatchOperand_ParseFail;
4394}
4395
4396/// Parse a arm instruction operand.  For now this parses the operand regardless
4397/// of the mnemonic.
4398bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4399                                StringRef Mnemonic) {
4400  SMLoc S, E;
4401
4402  // Check if the current operand has a custom associated parser, if so, try to
4403  // custom parse the operand, or fallback to the general approach.
4404  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4405  if (ResTy == MatchOperand_Success)
4406    return false;
4407  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4408  // there was a match, but an error occurred, in which case, just return that
4409  // the operand parsing failed.
4410  if (ResTy == MatchOperand_ParseFail)
4411    return true;
4412
4413  switch (getLexer().getKind()) {
4414  default:
4415    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4416    return true;
4417  case AsmToken::Identifier: {
4418    if (!tryParseRegisterWithWriteBack(Operands))
4419      return false;
4420    int Res = tryParseShiftRegister(Operands);
4421    if (Res == 0) // success
4422      return false;
4423    else if (Res == -1) // irrecoverable error
4424      return true;
4425    // If this is VMRS, check for the apsr_nzcv operand.
4426    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4427      S = Parser.getTok().getLoc();
4428      Parser.Lex();
4429      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4430      return false;
4431    }
4432
4433    // Fall though for the Identifier case that is not a register or a
4434    // special name.
4435  }
4436  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4437  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4438  case AsmToken::String:  // quoted label names.
4439  case AsmToken::Dot: {   // . as a branch target
4440    // This was not a register so parse other operands that start with an
4441    // identifier (like labels) as expressions and create them as immediates.
4442    const MCExpr *IdVal;
4443    S = Parser.getTok().getLoc();
4444    if (getParser().ParseExpression(IdVal))
4445      return true;
4446    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4447    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4448    return false;
4449  }
4450  case AsmToken::LBrac:
4451    return parseMemory(Operands);
4452  case AsmToken::LCurly:
4453    return parseRegisterList(Operands);
4454  case AsmToken::Dollar:
4455  case AsmToken::Hash: {
4456    // #42 -> immediate.
4457    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4458    S = Parser.getTok().getLoc();
4459    Parser.Lex();
4460    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4461    const MCExpr *ImmVal;
4462    if (getParser().ParseExpression(ImmVal))
4463      return true;
4464    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4465    if (CE) {
4466      int32_t Val = CE->getValue();
4467      if (isNegative && Val == 0)
4468        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4469    }
4470    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4471    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4472    return false;
4473  }
4474  case AsmToken::Colon: {
4475    // ":lower16:" and ":upper16:" expression prefixes
4476    // FIXME: Check it's an expression prefix,
4477    // e.g. (FOO - :lower16:BAR) isn't legal.
4478    ARMMCExpr::VariantKind RefKind;
4479    if (parsePrefix(RefKind))
4480      return true;
4481
4482    const MCExpr *SubExprVal;
4483    if (getParser().ParseExpression(SubExprVal))
4484      return true;
4485
4486    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4487                                                   getContext());
4488    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4489    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4490    return false;
4491  }
4492  }
4493}
4494
4495// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4496//  :lower16: and :upper16:.
4497bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4498  RefKind = ARMMCExpr::VK_ARM_None;
4499
4500  // :lower16: and :upper16: modifiers
4501  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4502  Parser.Lex(); // Eat ':'
4503
4504  if (getLexer().isNot(AsmToken::Identifier)) {
4505    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4506    return true;
4507  }
4508
4509  StringRef IDVal = Parser.getTok().getIdentifier();
4510  if (IDVal == "lower16") {
4511    RefKind = ARMMCExpr::VK_ARM_LO16;
4512  } else if (IDVal == "upper16") {
4513    RefKind = ARMMCExpr::VK_ARM_HI16;
4514  } else {
4515    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4516    return true;
4517  }
4518  Parser.Lex();
4519
4520  if (getLexer().isNot(AsmToken::Colon)) {
4521    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4522    return true;
4523  }
4524  Parser.Lex(); // Eat the last ':'
4525  return false;
4526}
4527
4528/// \brief Given a mnemonic, split out possible predication code and carry
4529/// setting letters to form a canonical mnemonic and flags.
4530//
4531// FIXME: Would be nice to autogen this.
4532// FIXME: This is a bit of a maze of special cases.
4533StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4534                                      unsigned &PredicationCode,
4535                                      bool &CarrySetting,
4536                                      unsigned &ProcessorIMod,
4537                                      StringRef &ITMask) {
4538  PredicationCode = ARMCC::AL;
4539  CarrySetting = false;
4540  ProcessorIMod = 0;
4541
4542  // Ignore some mnemonics we know aren't predicated forms.
4543  //
4544  // FIXME: Would be nice to autogen this.
4545  if ((Mnemonic == "movs" && isThumb()) ||
4546      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4547      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4548      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4549      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4550      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4551      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4552      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4553      Mnemonic == "fmuls")
4554    return Mnemonic;
4555
4556  // First, split out any predication code. Ignore mnemonics we know aren't
4557  // predicated but do have a carry-set and so weren't caught above.
4558  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4559      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4560      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4561      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4562    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4563      .Case("eq", ARMCC::EQ)
4564      .Case("ne", ARMCC::NE)
4565      .Case("hs", ARMCC::HS)
4566      .Case("cs", ARMCC::HS)
4567      .Case("lo", ARMCC::LO)
4568      .Case("cc", ARMCC::LO)
4569      .Case("mi", ARMCC::MI)
4570      .Case("pl", ARMCC::PL)
4571      .Case("vs", ARMCC::VS)
4572      .Case("vc", ARMCC::VC)
4573      .Case("hi", ARMCC::HI)
4574      .Case("ls", ARMCC::LS)
4575      .Case("ge", ARMCC::GE)
4576      .Case("lt", ARMCC::LT)
4577      .Case("gt", ARMCC::GT)
4578      .Case("le", ARMCC::LE)
4579      .Case("al", ARMCC::AL)
4580      .Default(~0U);
4581    if (CC != ~0U) {
4582      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4583      PredicationCode = CC;
4584    }
4585  }
4586
4587  // Next, determine if we have a carry setting bit. We explicitly ignore all
4588  // the instructions we know end in 's'.
4589  if (Mnemonic.endswith("s") &&
4590      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4591        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4592        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4593        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4594        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4595        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4596        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4597        Mnemonic == "fmuls" || Mnemonic == "fcmps" ||
4598        (Mnemonic == "movs" && isThumb()))) {
4599    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4600    CarrySetting = true;
4601  }
4602
4603  // The "cps" instruction can have a interrupt mode operand which is glued into
4604  // the mnemonic. Check if this is the case, split it and parse the imod op
4605  if (Mnemonic.startswith("cps")) {
4606    // Split out any imod code.
4607    unsigned IMod =
4608      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4609      .Case("ie", ARM_PROC::IE)
4610      .Case("id", ARM_PROC::ID)
4611      .Default(~0U);
4612    if (IMod != ~0U) {
4613      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4614      ProcessorIMod = IMod;
4615    }
4616  }
4617
4618  // The "it" instruction has the condition mask on the end of the mnemonic.
4619  if (Mnemonic.startswith("it")) {
4620    ITMask = Mnemonic.slice(2, Mnemonic.size());
4621    Mnemonic = Mnemonic.slice(0, 2);
4622  }
4623
4624  return Mnemonic;
4625}
4626
4627/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4628/// inclusion of carry set or predication code operands.
4629//
4630// FIXME: It would be nice to autogen this.
4631void ARMAsmParser::
4632getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4633                      bool &CanAcceptPredicationCode) {
4634  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4635      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4636      Mnemonic == "add" || Mnemonic == "adc" ||
4637      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4638      Mnemonic == "orr" || Mnemonic == "mvn" ||
4639      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4640      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4641      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4642                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4643                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4644    CanAcceptCarrySet = true;
4645  } else
4646    CanAcceptCarrySet = false;
4647
4648  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4649      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4650      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4651      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4652      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4653      (Mnemonic == "clrex" && !isThumb()) ||
4654      (Mnemonic == "nop" && isThumbOne()) ||
4655      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4656        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4657        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4658      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4659       !isThumb()) ||
4660      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4661    CanAcceptPredicationCode = false;
4662  } else
4663    CanAcceptPredicationCode = true;
4664
4665  if (isThumb()) {
4666    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4667        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4668      CanAcceptPredicationCode = false;
4669  }
4670}
4671
4672bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4673                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4674  // FIXME: This is all horribly hacky. We really need a better way to deal
4675  // with optional operands like this in the matcher table.
4676
4677  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4678  // another does not. Specifically, the MOVW instruction does not. So we
4679  // special case it here and remove the defaulted (non-setting) cc_out
4680  // operand if that's the instruction we're trying to match.
4681  //
4682  // We do this as post-processing of the explicit operands rather than just
4683  // conditionally adding the cc_out in the first place because we need
4684  // to check the type of the parsed immediate operand.
4685  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4686      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4687      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4688      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4689    return true;
4690
4691  // Register-register 'add' for thumb does not have a cc_out operand
4692  // when there are only two register operands.
4693  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4694      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4695      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4696      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4697    return true;
4698  // Register-register 'add' for thumb does not have a cc_out operand
4699  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4700  // have to check the immediate range here since Thumb2 has a variant
4701  // that can handle a different range and has a cc_out operand.
4702  if (((isThumb() && Mnemonic == "add") ||
4703       (isThumbTwo() && Mnemonic == "sub")) &&
4704      Operands.size() == 6 &&
4705      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4706      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4707      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4708      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4709      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4710       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4711    return true;
4712  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4713  // imm0_4095 variant. That's the least-preferred variant when
4714  // selecting via the generic "add" mnemonic, so to know that we
4715  // should remove the cc_out operand, we have to explicitly check that
4716  // it's not one of the other variants. Ugh.
4717  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4718      Operands.size() == 6 &&
4719      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4720      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4721      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4722    // Nest conditions rather than one big 'if' statement for readability.
4723    //
4724    // If either register is a high reg, it's either one of the SP
4725    // variants (handled above) or a 32-bit encoding, so we just
4726    // check against T3. If the second register is the PC, this is an
4727    // alternate form of ADR, which uses encoding T4, so check for that too.
4728    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4729         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4730        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4731        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4732      return false;
4733    // If both registers are low, we're in an IT block, and the immediate is
4734    // in range, we should use encoding T1 instead, which has a cc_out.
4735    if (inITBlock() &&
4736        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4737        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4738        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4739      return false;
4740
4741    // Otherwise, we use encoding T4, which does not have a cc_out
4742    // operand.
4743    return true;
4744  }
4745
4746  // The thumb2 multiply instruction doesn't have a CCOut register, so
4747  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4748  // use the 16-bit encoding or not.
4749  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4750      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4751      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4752      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4753      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4754      // If the registers aren't low regs, the destination reg isn't the
4755      // same as one of the source regs, or the cc_out operand is zero
4756      // outside of an IT block, we have to use the 32-bit encoding, so
4757      // remove the cc_out operand.
4758      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4759       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4760       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4761       !inITBlock() ||
4762       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4763        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4764        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4765        static_cast<ARMOperand*>(Operands[4])->getReg())))
4766    return true;
4767
4768  // Also check the 'mul' syntax variant that doesn't specify an explicit
4769  // destination register.
4770  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4771      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4772      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4773      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4774      // If the registers aren't low regs  or the cc_out operand is zero
4775      // outside of an IT block, we have to use the 32-bit encoding, so
4776      // remove the cc_out operand.
4777      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4778       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4779       !inITBlock()))
4780    return true;
4781
4782
4783
4784  // Register-register 'add/sub' for thumb does not have a cc_out operand
4785  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4786  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4787  // right, this will result in better diagnostics (which operand is off)
4788  // anyway.
4789  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4790      (Operands.size() == 5 || Operands.size() == 6) &&
4791      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4792      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4793      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4794    return true;
4795
4796  return false;
4797}
4798
4799static bool isDataTypeToken(StringRef Tok) {
4800  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4801    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4802    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4803    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4804    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4805    Tok == ".f" || Tok == ".d";
4806}
4807
4808// FIXME: This bit should probably be handled via an explicit match class
4809// in the .td files that matches the suffix instead of having it be
4810// a literal string token the way it is now.
4811static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4812  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4813}
4814
4815static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4816/// Parse an arm instruction mnemonic followed by its operands.
4817bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4818                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4819  // Apply mnemonic aliases before doing anything else, as the destination
4820  // mnemnonic may include suffices and we want to handle them normally.
4821  // The generic tblgen'erated code does this later, at the start of
4822  // MatchInstructionImpl(), but that's too late for aliases that include
4823  // any sort of suffix.
4824  unsigned AvailableFeatures = getAvailableFeatures();
4825  applyMnemonicAliases(Name, AvailableFeatures);
4826
4827  // First check for the ARM-specific .req directive.
4828  if (Parser.getTok().is(AsmToken::Identifier) &&
4829      Parser.getTok().getIdentifier() == ".req") {
4830    parseDirectiveReq(Name, NameLoc);
4831    // We always return 'error' for this, as we're done with this
4832    // statement and don't need to match the 'instruction."
4833    return true;
4834  }
4835
4836  // Create the leading tokens for the mnemonic, split by '.' characters.
4837  size_t Start = 0, Next = Name.find('.');
4838  StringRef Mnemonic = Name.slice(Start, Next);
4839
4840  // Split out the predication code and carry setting flag from the mnemonic.
4841  unsigned PredicationCode;
4842  unsigned ProcessorIMod;
4843  bool CarrySetting;
4844  StringRef ITMask;
4845  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4846                           ProcessorIMod, ITMask);
4847
4848  // In Thumb1, only the branch (B) instruction can be predicated.
4849  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4850    Parser.EatToEndOfStatement();
4851    return Error(NameLoc, "conditional execution not supported in Thumb1");
4852  }
4853
4854  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4855
4856  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4857  // is the mask as it will be for the IT encoding if the conditional
4858  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4859  // where the conditional bit0 is zero, the instruction post-processing
4860  // will adjust the mask accordingly.
4861  if (Mnemonic == "it") {
4862    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4863    if (ITMask.size() > 3) {
4864      Parser.EatToEndOfStatement();
4865      return Error(Loc, "too many conditions on IT instruction");
4866    }
4867    unsigned Mask = 8;
4868    for (unsigned i = ITMask.size(); i != 0; --i) {
4869      char pos = ITMask[i - 1];
4870      if (pos != 't' && pos != 'e') {
4871        Parser.EatToEndOfStatement();
4872        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4873      }
4874      Mask >>= 1;
4875      if (ITMask[i - 1] == 't')
4876        Mask |= 8;
4877    }
4878    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4879  }
4880
4881  // FIXME: This is all a pretty gross hack. We should automatically handle
4882  // optional operands like this via tblgen.
4883
4884  // Next, add the CCOut and ConditionCode operands, if needed.
4885  //
4886  // For mnemonics which can ever incorporate a carry setting bit or predication
4887  // code, our matching model involves us always generating CCOut and
4888  // ConditionCode operands to match the mnemonic "as written" and then we let
4889  // the matcher deal with finding the right instruction or generating an
4890  // appropriate error.
4891  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4892  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4893
4894  // If we had a carry-set on an instruction that can't do that, issue an
4895  // error.
4896  if (!CanAcceptCarrySet && CarrySetting) {
4897    Parser.EatToEndOfStatement();
4898    return Error(NameLoc, "instruction '" + Mnemonic +
4899                 "' can not set flags, but 's' suffix specified");
4900  }
4901  // If we had a predication code on an instruction that can't do that, issue an
4902  // error.
4903  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4904    Parser.EatToEndOfStatement();
4905    return Error(NameLoc, "instruction '" + Mnemonic +
4906                 "' is not predicable, but condition code specified");
4907  }
4908
4909  // Add the carry setting operand, if necessary.
4910  if (CanAcceptCarrySet) {
4911    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4912    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4913                                               Loc));
4914  }
4915
4916  // Add the predication code operand, if necessary.
4917  if (CanAcceptPredicationCode) {
4918    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4919                                      CarrySetting);
4920    Operands.push_back(ARMOperand::CreateCondCode(
4921                         ARMCC::CondCodes(PredicationCode), Loc));
4922  }
4923
4924  // Add the processor imod operand, if necessary.
4925  if (ProcessorIMod) {
4926    Operands.push_back(ARMOperand::CreateImm(
4927          MCConstantExpr::Create(ProcessorIMod, getContext()),
4928                                 NameLoc, NameLoc));
4929  }
4930
4931  // Add the remaining tokens in the mnemonic.
4932  while (Next != StringRef::npos) {
4933    Start = Next;
4934    Next = Name.find('.', Start + 1);
4935    StringRef ExtraToken = Name.slice(Start, Next);
4936
4937    // Some NEON instructions have an optional datatype suffix that is
4938    // completely ignored. Check for that.
4939    if (isDataTypeToken(ExtraToken) &&
4940        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4941      continue;
4942
4943    if (ExtraToken != ".n") {
4944      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4945      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4946    }
4947  }
4948
4949  // Read the remaining operands.
4950  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4951    // Read the first operand.
4952    if (parseOperand(Operands, Mnemonic)) {
4953      Parser.EatToEndOfStatement();
4954      return true;
4955    }
4956
4957    while (getLexer().is(AsmToken::Comma)) {
4958      Parser.Lex();  // Eat the comma.
4959
4960      // Parse and remember the operand.
4961      if (parseOperand(Operands, Mnemonic)) {
4962        Parser.EatToEndOfStatement();
4963        return true;
4964      }
4965    }
4966  }
4967
4968  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4969    SMLoc Loc = getLexer().getLoc();
4970    Parser.EatToEndOfStatement();
4971    return Error(Loc, "unexpected token in argument list");
4972  }
4973
4974  Parser.Lex(); // Consume the EndOfStatement
4975
4976  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4977  // do and don't have a cc_out optional-def operand. With some spot-checks
4978  // of the operand list, we can figure out which variant we're trying to
4979  // parse and adjust accordingly before actually matching. We shouldn't ever
4980  // try to remove a cc_out operand that was explicitly set on the the
4981  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4982  // table driven matcher doesn't fit well with the ARM instruction set.
4983  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4984    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4985    Operands.erase(Operands.begin() + 1);
4986    delete Op;
4987  }
4988
4989  // ARM mode 'blx' need special handling, as the register operand version
4990  // is predicable, but the label operand version is not. So, we can't rely
4991  // on the Mnemonic based checking to correctly figure out when to put
4992  // a k_CondCode operand in the list. If we're trying to match the label
4993  // version, remove the k_CondCode operand here.
4994  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4995      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4996    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4997    Operands.erase(Operands.begin() + 1);
4998    delete Op;
4999  }
5000
5001  // The vector-compare-to-zero instructions have a literal token "#0" at
5002  // the end that comes to here as an immediate operand. Convert it to a
5003  // token to play nicely with the matcher.
5004  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5005      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5006      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5007    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5008    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5009    if (CE && CE->getValue() == 0) {
5010      Operands.erase(Operands.begin() + 5);
5011      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5012      delete Op;
5013    }
5014  }
5015  // VCMP{E} does the same thing, but with a different operand count.
5016  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5017      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5018    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5019    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5020    if (CE && CE->getValue() == 0) {
5021      Operands.erase(Operands.begin() + 4);
5022      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5023      delete Op;
5024    }
5025  }
5026  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5027  // end. Convert it to a token here. Take care not to convert those
5028  // that should hit the Thumb2 encoding.
5029  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5030      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5031      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5032      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5033    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5034    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5035    if (CE && CE->getValue() == 0 &&
5036        (isThumbOne() ||
5037         // The cc_out operand matches the IT block.
5038         ((inITBlock() != CarrySetting) &&
5039         // Neither register operand is a high register.
5040         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5041          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5042      Operands.erase(Operands.begin() + 5);
5043      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5044      delete Op;
5045    }
5046  }
5047
5048  return false;
5049}
5050
5051// Validate context-sensitive operand constraints.
5052
5053// return 'true' if register list contains non-low GPR registers,
5054// 'false' otherwise. If Reg is in the register list or is HiReg, set
5055// 'containsReg' to true.
5056static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5057                                 unsigned HiReg, bool &containsReg) {
5058  containsReg = false;
5059  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5060    unsigned OpReg = Inst.getOperand(i).getReg();
5061    if (OpReg == Reg)
5062      containsReg = true;
5063    // Anything other than a low register isn't legal here.
5064    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5065      return true;
5066  }
5067  return false;
5068}
5069
5070// Check if the specified regisgter is in the register list of the inst,
5071// starting at the indicated operand number.
5072static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5073  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5074    unsigned OpReg = Inst.getOperand(i).getReg();
5075    if (OpReg == Reg)
5076      return true;
5077  }
5078  return false;
5079}
5080
5081// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5082// the ARMInsts array) instead. Getting that here requires awkward
5083// API changes, though. Better way?
5084namespace llvm {
5085extern const MCInstrDesc ARMInsts[];
5086}
5087static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5088  return ARMInsts[Opcode];
5089}
5090
5091// FIXME: We would really like to be able to tablegen'erate this.
5092bool ARMAsmParser::
5093validateInstruction(MCInst &Inst,
5094                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5095  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5096  SMLoc Loc = Operands[0]->getStartLoc();
5097  // Check the IT block state first.
5098  // NOTE: BKPT instruction has the interesting property of being
5099  // allowed in IT blocks, but not being predicable.  It just always
5100  // executes.
5101  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5102      Inst.getOpcode() != ARM::BKPT) {
5103    unsigned bit = 1;
5104    if (ITState.FirstCond)
5105      ITState.FirstCond = false;
5106    else
5107      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5108    // The instruction must be predicable.
5109    if (!MCID.isPredicable())
5110      return Error(Loc, "instructions in IT block must be predicable");
5111    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5112    unsigned ITCond = bit ? ITState.Cond :
5113      ARMCC::getOppositeCondition(ITState.Cond);
5114    if (Cond != ITCond) {
5115      // Find the condition code Operand to get its SMLoc information.
5116      SMLoc CondLoc;
5117      for (unsigned i = 1; i < Operands.size(); ++i)
5118        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5119          CondLoc = Operands[i]->getStartLoc();
5120      return Error(CondLoc, "incorrect condition in IT block; got '" +
5121                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5122                   "', but expected '" +
5123                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5124    }
5125  // Check for non-'al' condition codes outside of the IT block.
5126  } else if (isThumbTwo() && MCID.isPredicable() &&
5127             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5128             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5129             Inst.getOpcode() != ARM::t2B)
5130    return Error(Loc, "predicated instructions must be in IT block");
5131
5132  switch (Inst.getOpcode()) {
5133  case ARM::LDRD:
5134  case ARM::LDRD_PRE:
5135  case ARM::LDRD_POST:
5136  case ARM::LDREXD: {
5137    // Rt2 must be Rt + 1.
5138    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5139    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5140    if (Rt2 != Rt + 1)
5141      return Error(Operands[3]->getStartLoc(),
5142                   "destination operands must be sequential");
5143    return false;
5144  }
5145  case ARM::STRD: {
5146    // Rt2 must be Rt + 1.
5147    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5148    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5149    if (Rt2 != Rt + 1)
5150      return Error(Operands[3]->getStartLoc(),
5151                   "source operands must be sequential");
5152    return false;
5153  }
5154  case ARM::STRD_PRE:
5155  case ARM::STRD_POST:
5156  case ARM::STREXD: {
5157    // Rt2 must be Rt + 1.
5158    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5159    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5160    if (Rt2 != Rt + 1)
5161      return Error(Operands[3]->getStartLoc(),
5162                   "source operands must be sequential");
5163    return false;
5164  }
5165  case ARM::SBFX:
5166  case ARM::UBFX: {
5167    // width must be in range [1, 32-lsb]
5168    unsigned lsb = Inst.getOperand(2).getImm();
5169    unsigned widthm1 = Inst.getOperand(3).getImm();
5170    if (widthm1 >= 32 - lsb)
5171      return Error(Operands[5]->getStartLoc(),
5172                   "bitfield width must be in range [1,32-lsb]");
5173    return false;
5174  }
5175  case ARM::tLDMIA: {
5176    // If we're parsing Thumb2, the .w variant is available and handles
5177    // most cases that are normally illegal for a Thumb1 LDM
5178    // instruction. We'll make the transformation in processInstruction()
5179    // if necessary.
5180    //
5181    // Thumb LDM instructions are writeback iff the base register is not
5182    // in the register list.
5183    unsigned Rn = Inst.getOperand(0).getReg();
5184    bool hasWritebackToken =
5185      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5186       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5187    bool listContainsBase;
5188    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5189      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5190                   "registers must be in range r0-r7");
5191    // If we should have writeback, then there should be a '!' token.
5192    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5193      return Error(Operands[2]->getStartLoc(),
5194                   "writeback operator '!' expected");
5195    // If we should not have writeback, there must not be a '!'. This is
5196    // true even for the 32-bit wide encodings.
5197    if (listContainsBase && hasWritebackToken)
5198      return Error(Operands[3]->getStartLoc(),
5199                   "writeback operator '!' not allowed when base register "
5200                   "in register list");
5201
5202    break;
5203  }
5204  case ARM::t2LDMIA_UPD: {
5205    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5206      return Error(Operands[4]->getStartLoc(),
5207                   "writeback operator '!' not allowed when base register "
5208                   "in register list");
5209    break;
5210  }
5211  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5212  // so only issue a diagnostic for thumb1. The instructions will be
5213  // switched to the t2 encodings in processInstruction() if necessary.
5214  case ARM::tPOP: {
5215    bool listContainsBase;
5216    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5217        !isThumbTwo())
5218      return Error(Operands[2]->getStartLoc(),
5219                   "registers must be in range r0-r7 or pc");
5220    break;
5221  }
5222  case ARM::tPUSH: {
5223    bool listContainsBase;
5224    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5225        !isThumbTwo())
5226      return Error(Operands[2]->getStartLoc(),
5227                   "registers must be in range r0-r7 or lr");
5228    break;
5229  }
5230  case ARM::tSTMIA_UPD: {
5231    bool listContainsBase;
5232    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5233      return Error(Operands[4]->getStartLoc(),
5234                   "registers must be in range r0-r7");
5235    break;
5236  }
5237  }
5238
5239  return false;
5240}
5241
5242static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5243  switch(Opc) {
5244  default: llvm_unreachable("unexpected opcode!");
5245  // VST1LN
5246  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5247  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5248  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5249  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5250  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5251  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5252  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5253  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5254  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5255
5256  // VST2LN
5257  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5258  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5259  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5260  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5261  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5262
5263  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5264  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5265  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5266  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5267  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5268
5269  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5270  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5271  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5272  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5273  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5274
5275  // VST3LN
5276  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5277  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5278  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5279  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5280  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5281  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5282  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5283  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5284  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5285  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5286  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5287  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5288  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5289  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5290  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5291
5292  // VST3
5293  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5294  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5295  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5296  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5297  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5298  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5299  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5300  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5301  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5302  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5303  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5304  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5305  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5306  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5307  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5308  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5309  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5310  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5311
5312  // VST4LN
5313  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5314  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5315  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5316  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5317  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5318  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5319  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5320  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5321  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5322  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5323  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5324  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5325  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5326  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5327  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5328
5329  // VST4
5330  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5331  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5332  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5333  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5334  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5335  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5336  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5337  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5338  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5339  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5340  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5341  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5342  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5343  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5344  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5345  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5346  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5347  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5348  }
5349}
5350
5351static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5352  switch(Opc) {
5353  default: llvm_unreachable("unexpected opcode!");
5354  // VLD1LN
5355  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5356  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5357  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5358  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5359  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5360  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5361  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5362  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5363  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5364
5365  // VLD2LN
5366  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5367  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5368  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5369  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5370  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5371  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5372  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5373  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5374  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5375  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5376  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5377  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5378  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5379  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5380  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5381
5382  // VLD3DUP
5383  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5384  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5385  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5386  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5387  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5388  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5389  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5390  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5391  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5392  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5393  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5394  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5395  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5396  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5397  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5398  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5399  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5400  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5401
5402  // VLD3LN
5403  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5404  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5405  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5406  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5407  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5408  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5409  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5410  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5411  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5412  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5413  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5414  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5415  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5416  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5417  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5418
5419  // VLD3
5420  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5421  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5422  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5423  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5424  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5425  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5426  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5427  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5428  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5429  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5430  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5431  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5432  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5433  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5434  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5435  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5436  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5437  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5438
5439  // VLD4LN
5440  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5441  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5442  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5443  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5444  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5445  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5446  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5447  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5448  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5449  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5450  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5451  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5452  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5453  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5454  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5455
5456  // VLD4DUP
5457  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5458  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5459  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5460  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5461  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5462  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5463  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5464  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5465  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5466  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5467  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5468  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5469  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5470  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5471  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5472  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5473  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5474  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5475
5476  // VLD4
5477  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5478  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5479  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5480  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5481  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5482  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5483  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5484  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5485  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5486  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5487  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5488  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5489  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5490  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5491  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5492  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5493  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5494  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5495  }
5496}
5497
5498bool ARMAsmParser::
5499processInstruction(MCInst &Inst,
5500                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5501  switch (Inst.getOpcode()) {
5502  // Aliases for alternate PC+imm syntax of LDR instructions.
5503  case ARM::t2LDRpcrel:
5504    Inst.setOpcode(ARM::t2LDRpci);
5505    return true;
5506  case ARM::t2LDRBpcrel:
5507    Inst.setOpcode(ARM::t2LDRBpci);
5508    return true;
5509  case ARM::t2LDRHpcrel:
5510    Inst.setOpcode(ARM::t2LDRHpci);
5511    return true;
5512  case ARM::t2LDRSBpcrel:
5513    Inst.setOpcode(ARM::t2LDRSBpci);
5514    return true;
5515  case ARM::t2LDRSHpcrel:
5516    Inst.setOpcode(ARM::t2LDRSHpci);
5517    return true;
5518  // Handle NEON VST complex aliases.
5519  case ARM::VST1LNdWB_register_Asm_8:
5520  case ARM::VST1LNdWB_register_Asm_16:
5521  case ARM::VST1LNdWB_register_Asm_32: {
5522    MCInst TmpInst;
5523    // Shuffle the operands around so the lane index operand is in the
5524    // right place.
5525    unsigned Spacing;
5526    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5527    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5528    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5529    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5530    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5531    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5532    TmpInst.addOperand(Inst.getOperand(1)); // lane
5533    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5534    TmpInst.addOperand(Inst.getOperand(6));
5535    Inst = TmpInst;
5536    return true;
5537  }
5538
5539  case ARM::VST2LNdWB_register_Asm_8:
5540  case ARM::VST2LNdWB_register_Asm_16:
5541  case ARM::VST2LNdWB_register_Asm_32:
5542  case ARM::VST2LNqWB_register_Asm_16:
5543  case ARM::VST2LNqWB_register_Asm_32: {
5544    MCInst TmpInst;
5545    // Shuffle the operands around so the lane index operand is in the
5546    // right place.
5547    unsigned Spacing;
5548    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5549    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5550    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5551    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5552    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5553    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5554    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5555                                            Spacing));
5556    TmpInst.addOperand(Inst.getOperand(1)); // lane
5557    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5558    TmpInst.addOperand(Inst.getOperand(6));
5559    Inst = TmpInst;
5560    return true;
5561  }
5562
5563  case ARM::VST3LNdWB_register_Asm_8:
5564  case ARM::VST3LNdWB_register_Asm_16:
5565  case ARM::VST3LNdWB_register_Asm_32:
5566  case ARM::VST3LNqWB_register_Asm_16:
5567  case ARM::VST3LNqWB_register_Asm_32: {
5568    MCInst TmpInst;
5569    // Shuffle the operands around so the lane index operand is in the
5570    // right place.
5571    unsigned Spacing;
5572    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5573    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5574    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5575    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5576    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5577    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5578    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5579                                            Spacing));
5580    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5581                                            Spacing * 2));
5582    TmpInst.addOperand(Inst.getOperand(1)); // lane
5583    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5584    TmpInst.addOperand(Inst.getOperand(6));
5585    Inst = TmpInst;
5586    return true;
5587  }
5588
5589  case ARM::VST4LNdWB_register_Asm_8:
5590  case ARM::VST4LNdWB_register_Asm_16:
5591  case ARM::VST4LNdWB_register_Asm_32:
5592  case ARM::VST4LNqWB_register_Asm_16:
5593  case ARM::VST4LNqWB_register_Asm_32: {
5594    MCInst TmpInst;
5595    // Shuffle the operands around so the lane index operand is in the
5596    // right place.
5597    unsigned Spacing;
5598    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5599    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5600    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5601    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5602    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5603    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5604    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5605                                            Spacing));
5606    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5607                                            Spacing * 2));
5608    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5609                                            Spacing * 3));
5610    TmpInst.addOperand(Inst.getOperand(1)); // lane
5611    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5612    TmpInst.addOperand(Inst.getOperand(6));
5613    Inst = TmpInst;
5614    return true;
5615  }
5616
5617  case ARM::VST1LNdWB_fixed_Asm_8:
5618  case ARM::VST1LNdWB_fixed_Asm_16:
5619  case ARM::VST1LNdWB_fixed_Asm_32: {
5620    MCInst TmpInst;
5621    // Shuffle the operands around so the lane index operand is in the
5622    // right place.
5623    unsigned Spacing;
5624    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5625    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5626    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5627    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5628    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5629    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5630    TmpInst.addOperand(Inst.getOperand(1)); // lane
5631    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5632    TmpInst.addOperand(Inst.getOperand(5));
5633    Inst = TmpInst;
5634    return true;
5635  }
5636
5637  case ARM::VST2LNdWB_fixed_Asm_8:
5638  case ARM::VST2LNdWB_fixed_Asm_16:
5639  case ARM::VST2LNdWB_fixed_Asm_32:
5640  case ARM::VST2LNqWB_fixed_Asm_16:
5641  case ARM::VST2LNqWB_fixed_Asm_32: {
5642    MCInst TmpInst;
5643    // Shuffle the operands around so the lane index operand is in the
5644    // right place.
5645    unsigned Spacing;
5646    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5647    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5648    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5649    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5650    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5651    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5652    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5653                                            Spacing));
5654    TmpInst.addOperand(Inst.getOperand(1)); // lane
5655    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5656    TmpInst.addOperand(Inst.getOperand(5));
5657    Inst = TmpInst;
5658    return true;
5659  }
5660
5661  case ARM::VST3LNdWB_fixed_Asm_8:
5662  case ARM::VST3LNdWB_fixed_Asm_16:
5663  case ARM::VST3LNdWB_fixed_Asm_32:
5664  case ARM::VST3LNqWB_fixed_Asm_16:
5665  case ARM::VST3LNqWB_fixed_Asm_32: {
5666    MCInst TmpInst;
5667    // Shuffle the operands around so the lane index operand is in the
5668    // right place.
5669    unsigned Spacing;
5670    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5671    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5672    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5673    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5674    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5675    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5676    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5677                                            Spacing));
5678    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5679                                            Spacing * 2));
5680    TmpInst.addOperand(Inst.getOperand(1)); // lane
5681    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5682    TmpInst.addOperand(Inst.getOperand(5));
5683    Inst = TmpInst;
5684    return true;
5685  }
5686
5687  case ARM::VST4LNdWB_fixed_Asm_8:
5688  case ARM::VST4LNdWB_fixed_Asm_16:
5689  case ARM::VST4LNdWB_fixed_Asm_32:
5690  case ARM::VST4LNqWB_fixed_Asm_16:
5691  case ARM::VST4LNqWB_fixed_Asm_32: {
5692    MCInst TmpInst;
5693    // Shuffle the operands around so the lane index operand is in the
5694    // right place.
5695    unsigned Spacing;
5696    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5697    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5698    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5699    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5700    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5701    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5702    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5703                                            Spacing));
5704    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5705                                            Spacing * 2));
5706    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5707                                            Spacing * 3));
5708    TmpInst.addOperand(Inst.getOperand(1)); // lane
5709    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5710    TmpInst.addOperand(Inst.getOperand(5));
5711    Inst = TmpInst;
5712    return true;
5713  }
5714
5715  case ARM::VST1LNdAsm_8:
5716  case ARM::VST1LNdAsm_16:
5717  case ARM::VST1LNdAsm_32: {
5718    MCInst TmpInst;
5719    // Shuffle the operands around so the lane index operand is in the
5720    // right place.
5721    unsigned Spacing;
5722    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5723    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5724    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5725    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5726    TmpInst.addOperand(Inst.getOperand(1)); // lane
5727    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5728    TmpInst.addOperand(Inst.getOperand(5));
5729    Inst = TmpInst;
5730    return true;
5731  }
5732
5733  case ARM::VST2LNdAsm_8:
5734  case ARM::VST2LNdAsm_16:
5735  case ARM::VST2LNdAsm_32:
5736  case ARM::VST2LNqAsm_16:
5737  case ARM::VST2LNqAsm_32: {
5738    MCInst TmpInst;
5739    // Shuffle the operands around so the lane index operand is in the
5740    // right place.
5741    unsigned Spacing;
5742    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5743    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5744    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5745    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5746    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5747                                            Spacing));
5748    TmpInst.addOperand(Inst.getOperand(1)); // lane
5749    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5750    TmpInst.addOperand(Inst.getOperand(5));
5751    Inst = TmpInst;
5752    return true;
5753  }
5754
5755  case ARM::VST3LNdAsm_8:
5756  case ARM::VST3LNdAsm_16:
5757  case ARM::VST3LNdAsm_32:
5758  case ARM::VST3LNqAsm_16:
5759  case ARM::VST3LNqAsm_32: {
5760    MCInst TmpInst;
5761    // Shuffle the operands around so the lane index operand is in the
5762    // right place.
5763    unsigned Spacing;
5764    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5765    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5766    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5767    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5768    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5769                                            Spacing));
5770    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5771                                            Spacing * 2));
5772    TmpInst.addOperand(Inst.getOperand(1)); // lane
5773    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5774    TmpInst.addOperand(Inst.getOperand(5));
5775    Inst = TmpInst;
5776    return true;
5777  }
5778
5779  case ARM::VST4LNdAsm_8:
5780  case ARM::VST4LNdAsm_16:
5781  case ARM::VST4LNdAsm_32:
5782  case ARM::VST4LNqAsm_16:
5783  case ARM::VST4LNqAsm_32: {
5784    MCInst TmpInst;
5785    // Shuffle the operands around so the lane index operand is in the
5786    // right place.
5787    unsigned Spacing;
5788    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5789    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5790    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5791    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5792    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5793                                            Spacing));
5794    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5795                                            Spacing * 2));
5796    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5797                                            Spacing * 3));
5798    TmpInst.addOperand(Inst.getOperand(1)); // lane
5799    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5800    TmpInst.addOperand(Inst.getOperand(5));
5801    Inst = TmpInst;
5802    return true;
5803  }
5804
5805  // Handle NEON VLD complex aliases.
5806  case ARM::VLD1LNdWB_register_Asm_8:
5807  case ARM::VLD1LNdWB_register_Asm_16:
5808  case ARM::VLD1LNdWB_register_Asm_32: {
5809    MCInst TmpInst;
5810    // Shuffle the operands around so the lane index operand is in the
5811    // right place.
5812    unsigned Spacing;
5813    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5814    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5815    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5816    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5817    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5818    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5819    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5820    TmpInst.addOperand(Inst.getOperand(1)); // lane
5821    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5822    TmpInst.addOperand(Inst.getOperand(6));
5823    Inst = TmpInst;
5824    return true;
5825  }
5826
5827  case ARM::VLD2LNdWB_register_Asm_8:
5828  case ARM::VLD2LNdWB_register_Asm_16:
5829  case ARM::VLD2LNdWB_register_Asm_32:
5830  case ARM::VLD2LNqWB_register_Asm_16:
5831  case ARM::VLD2LNqWB_register_Asm_32: {
5832    MCInst TmpInst;
5833    // Shuffle the operands around so the lane index operand is in the
5834    // right place.
5835    unsigned Spacing;
5836    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5837    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5838    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5839                                            Spacing));
5840    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5841    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5842    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5843    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5844    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5845    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5846                                            Spacing));
5847    TmpInst.addOperand(Inst.getOperand(1)); // lane
5848    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5849    TmpInst.addOperand(Inst.getOperand(6));
5850    Inst = TmpInst;
5851    return true;
5852  }
5853
5854  case ARM::VLD3LNdWB_register_Asm_8:
5855  case ARM::VLD3LNdWB_register_Asm_16:
5856  case ARM::VLD3LNdWB_register_Asm_32:
5857  case ARM::VLD3LNqWB_register_Asm_16:
5858  case ARM::VLD3LNqWB_register_Asm_32: {
5859    MCInst TmpInst;
5860    // Shuffle the operands around so the lane index operand is in the
5861    // right place.
5862    unsigned Spacing;
5863    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5864    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5865    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5866                                            Spacing));
5867    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5868                                            Spacing * 2));
5869    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5870    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5871    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5872    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5873    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5874    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5875                                            Spacing));
5876    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5877                                            Spacing * 2));
5878    TmpInst.addOperand(Inst.getOperand(1)); // lane
5879    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5880    TmpInst.addOperand(Inst.getOperand(6));
5881    Inst = TmpInst;
5882    return true;
5883  }
5884
5885  case ARM::VLD4LNdWB_register_Asm_8:
5886  case ARM::VLD4LNdWB_register_Asm_16:
5887  case ARM::VLD4LNdWB_register_Asm_32:
5888  case ARM::VLD4LNqWB_register_Asm_16:
5889  case ARM::VLD4LNqWB_register_Asm_32: {
5890    MCInst TmpInst;
5891    // Shuffle the operands around so the lane index operand is in the
5892    // right place.
5893    unsigned Spacing;
5894    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5895    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5896    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5897                                            Spacing));
5898    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5899                                            Spacing * 2));
5900    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5901                                            Spacing * 3));
5902    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5903    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5904    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5905    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5906    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5907    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5908                                            Spacing));
5909    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5910                                            Spacing * 2));
5911    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5912                                            Spacing * 3));
5913    TmpInst.addOperand(Inst.getOperand(1)); // lane
5914    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5915    TmpInst.addOperand(Inst.getOperand(6));
5916    Inst = TmpInst;
5917    return true;
5918  }
5919
5920  case ARM::VLD1LNdWB_fixed_Asm_8:
5921  case ARM::VLD1LNdWB_fixed_Asm_16:
5922  case ARM::VLD1LNdWB_fixed_Asm_32: {
5923    MCInst TmpInst;
5924    // Shuffle the operands around so the lane index operand is in the
5925    // right place.
5926    unsigned Spacing;
5927    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5928    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5929    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5930    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5931    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5932    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5933    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5934    TmpInst.addOperand(Inst.getOperand(1)); // lane
5935    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5936    TmpInst.addOperand(Inst.getOperand(5));
5937    Inst = TmpInst;
5938    return true;
5939  }
5940
5941  case ARM::VLD2LNdWB_fixed_Asm_8:
5942  case ARM::VLD2LNdWB_fixed_Asm_16:
5943  case ARM::VLD2LNdWB_fixed_Asm_32:
5944  case ARM::VLD2LNqWB_fixed_Asm_16:
5945  case ARM::VLD2LNqWB_fixed_Asm_32: {
5946    MCInst TmpInst;
5947    // Shuffle the operands around so the lane index operand is in the
5948    // right place.
5949    unsigned Spacing;
5950    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5951    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5952    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5953                                            Spacing));
5954    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5955    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5956    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5957    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5958    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5959    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5960                                            Spacing));
5961    TmpInst.addOperand(Inst.getOperand(1)); // lane
5962    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5963    TmpInst.addOperand(Inst.getOperand(5));
5964    Inst = TmpInst;
5965    return true;
5966  }
5967
5968  case ARM::VLD3LNdWB_fixed_Asm_8:
5969  case ARM::VLD3LNdWB_fixed_Asm_16:
5970  case ARM::VLD3LNdWB_fixed_Asm_32:
5971  case ARM::VLD3LNqWB_fixed_Asm_16:
5972  case ARM::VLD3LNqWB_fixed_Asm_32: {
5973    MCInst TmpInst;
5974    // Shuffle the operands around so the lane index operand is in the
5975    // right place.
5976    unsigned Spacing;
5977    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5978    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5979    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5980                                            Spacing));
5981    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5982                                            Spacing * 2));
5983    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5984    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5985    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5986    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5987    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5988    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5989                                            Spacing));
5990    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5991                                            Spacing * 2));
5992    TmpInst.addOperand(Inst.getOperand(1)); // lane
5993    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5994    TmpInst.addOperand(Inst.getOperand(5));
5995    Inst = TmpInst;
5996    return true;
5997  }
5998
5999  case ARM::VLD4LNdWB_fixed_Asm_8:
6000  case ARM::VLD4LNdWB_fixed_Asm_16:
6001  case ARM::VLD4LNdWB_fixed_Asm_32:
6002  case ARM::VLD4LNqWB_fixed_Asm_16:
6003  case ARM::VLD4LNqWB_fixed_Asm_32: {
6004    MCInst TmpInst;
6005    // Shuffle the operands around so the lane index operand is in the
6006    // right place.
6007    unsigned Spacing;
6008    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6009    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6010    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6011                                            Spacing));
6012    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6013                                            Spacing * 2));
6014    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6015                                            Spacing * 3));
6016    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6017    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6018    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6019    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6020    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6021    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6022                                            Spacing));
6023    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6024                                            Spacing * 2));
6025    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6026                                            Spacing * 3));
6027    TmpInst.addOperand(Inst.getOperand(1)); // lane
6028    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6029    TmpInst.addOperand(Inst.getOperand(5));
6030    Inst = TmpInst;
6031    return true;
6032  }
6033
6034  case ARM::VLD1LNdAsm_8:
6035  case ARM::VLD1LNdAsm_16:
6036  case ARM::VLD1LNdAsm_32: {
6037    MCInst TmpInst;
6038    // Shuffle the operands around so the lane index operand is in the
6039    // right place.
6040    unsigned Spacing;
6041    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6042    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6043    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6044    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6045    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6046    TmpInst.addOperand(Inst.getOperand(1)); // lane
6047    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6048    TmpInst.addOperand(Inst.getOperand(5));
6049    Inst = TmpInst;
6050    return true;
6051  }
6052
6053  case ARM::VLD2LNdAsm_8:
6054  case ARM::VLD2LNdAsm_16:
6055  case ARM::VLD2LNdAsm_32:
6056  case ARM::VLD2LNqAsm_16:
6057  case ARM::VLD2LNqAsm_32: {
6058    MCInst TmpInst;
6059    // Shuffle the operands around so the lane index operand is in the
6060    // right place.
6061    unsigned Spacing;
6062    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6063    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6064    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6065                                            Spacing));
6066    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6067    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6068    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6069    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6070                                            Spacing));
6071    TmpInst.addOperand(Inst.getOperand(1)); // lane
6072    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6073    TmpInst.addOperand(Inst.getOperand(5));
6074    Inst = TmpInst;
6075    return true;
6076  }
6077
6078  case ARM::VLD3LNdAsm_8:
6079  case ARM::VLD3LNdAsm_16:
6080  case ARM::VLD3LNdAsm_32:
6081  case ARM::VLD3LNqAsm_16:
6082  case ARM::VLD3LNqAsm_32: {
6083    MCInst TmpInst;
6084    // Shuffle the operands around so the lane index operand is in the
6085    // right place.
6086    unsigned Spacing;
6087    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6088    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6089    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6090                                            Spacing));
6091    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6092                                            Spacing * 2));
6093    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6094    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6095    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6096    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6097                                            Spacing));
6098    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6099                                            Spacing * 2));
6100    TmpInst.addOperand(Inst.getOperand(1)); // lane
6101    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6102    TmpInst.addOperand(Inst.getOperand(5));
6103    Inst = TmpInst;
6104    return true;
6105  }
6106
6107  case ARM::VLD4LNdAsm_8:
6108  case ARM::VLD4LNdAsm_16:
6109  case ARM::VLD4LNdAsm_32:
6110  case ARM::VLD4LNqAsm_16:
6111  case ARM::VLD4LNqAsm_32: {
6112    MCInst TmpInst;
6113    // Shuffle the operands around so the lane index operand is in the
6114    // right place.
6115    unsigned Spacing;
6116    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6117    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6118    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6119                                            Spacing));
6120    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6121                                            Spacing * 2));
6122    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6123                                            Spacing * 3));
6124    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6125    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6126    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6127    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6128                                            Spacing));
6129    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6130                                            Spacing * 2));
6131    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6132                                            Spacing * 3));
6133    TmpInst.addOperand(Inst.getOperand(1)); // lane
6134    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6135    TmpInst.addOperand(Inst.getOperand(5));
6136    Inst = TmpInst;
6137    return true;
6138  }
6139
6140  // VLD3DUP single 3-element structure to all lanes instructions.
6141  case ARM::VLD3DUPdAsm_8:
6142  case ARM::VLD3DUPdAsm_16:
6143  case ARM::VLD3DUPdAsm_32:
6144  case ARM::VLD3DUPqAsm_8:
6145  case ARM::VLD3DUPqAsm_16:
6146  case ARM::VLD3DUPqAsm_32: {
6147    MCInst TmpInst;
6148    unsigned Spacing;
6149    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6150    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6151    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6152                                            Spacing));
6153    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6154                                            Spacing * 2));
6155    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6156    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6157    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6158    TmpInst.addOperand(Inst.getOperand(4));
6159    Inst = TmpInst;
6160    return true;
6161  }
6162
6163  case ARM::VLD3DUPdWB_fixed_Asm_8:
6164  case ARM::VLD3DUPdWB_fixed_Asm_16:
6165  case ARM::VLD3DUPdWB_fixed_Asm_32:
6166  case ARM::VLD3DUPqWB_fixed_Asm_8:
6167  case ARM::VLD3DUPqWB_fixed_Asm_16:
6168  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6169    MCInst TmpInst;
6170    unsigned Spacing;
6171    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6172    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6173    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6174                                            Spacing));
6175    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6176                                            Spacing * 2));
6177    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6178    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6179    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6180    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6181    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6182    TmpInst.addOperand(Inst.getOperand(4));
6183    Inst = TmpInst;
6184    return true;
6185  }
6186
6187  case ARM::VLD3DUPdWB_register_Asm_8:
6188  case ARM::VLD3DUPdWB_register_Asm_16:
6189  case ARM::VLD3DUPdWB_register_Asm_32:
6190  case ARM::VLD3DUPqWB_register_Asm_8:
6191  case ARM::VLD3DUPqWB_register_Asm_16:
6192  case ARM::VLD3DUPqWB_register_Asm_32: {
6193    MCInst TmpInst;
6194    unsigned Spacing;
6195    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6196    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6197    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6198                                            Spacing));
6199    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6200                                            Spacing * 2));
6201    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6202    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6203    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6204    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6205    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6206    TmpInst.addOperand(Inst.getOperand(5));
6207    Inst = TmpInst;
6208    return true;
6209  }
6210
6211  // VLD3 multiple 3-element structure instructions.
6212  case ARM::VLD3dAsm_8:
6213  case ARM::VLD3dAsm_16:
6214  case ARM::VLD3dAsm_32:
6215  case ARM::VLD3qAsm_8:
6216  case ARM::VLD3qAsm_16:
6217  case ARM::VLD3qAsm_32: {
6218    MCInst TmpInst;
6219    unsigned Spacing;
6220    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6221    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6222    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6223                                            Spacing));
6224    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6225                                            Spacing * 2));
6226    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6227    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6228    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6229    TmpInst.addOperand(Inst.getOperand(4));
6230    Inst = TmpInst;
6231    return true;
6232  }
6233
6234  case ARM::VLD3dWB_fixed_Asm_8:
6235  case ARM::VLD3dWB_fixed_Asm_16:
6236  case ARM::VLD3dWB_fixed_Asm_32:
6237  case ARM::VLD3qWB_fixed_Asm_8:
6238  case ARM::VLD3qWB_fixed_Asm_16:
6239  case ARM::VLD3qWB_fixed_Asm_32: {
6240    MCInst TmpInst;
6241    unsigned Spacing;
6242    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6243    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6244    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6245                                            Spacing));
6246    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6247                                            Spacing * 2));
6248    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6249    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6250    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6251    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6252    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6253    TmpInst.addOperand(Inst.getOperand(4));
6254    Inst = TmpInst;
6255    return true;
6256  }
6257
6258  case ARM::VLD3dWB_register_Asm_8:
6259  case ARM::VLD3dWB_register_Asm_16:
6260  case ARM::VLD3dWB_register_Asm_32:
6261  case ARM::VLD3qWB_register_Asm_8:
6262  case ARM::VLD3qWB_register_Asm_16:
6263  case ARM::VLD3qWB_register_Asm_32: {
6264    MCInst TmpInst;
6265    unsigned Spacing;
6266    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6267    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6268    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6269                                            Spacing));
6270    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6271                                            Spacing * 2));
6272    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6273    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6274    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6275    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6276    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6277    TmpInst.addOperand(Inst.getOperand(5));
6278    Inst = TmpInst;
6279    return true;
6280  }
6281
6282  // VLD4DUP single 3-element structure to all lanes instructions.
6283  case ARM::VLD4DUPdAsm_8:
6284  case ARM::VLD4DUPdAsm_16:
6285  case ARM::VLD4DUPdAsm_32:
6286  case ARM::VLD4DUPqAsm_8:
6287  case ARM::VLD4DUPqAsm_16:
6288  case ARM::VLD4DUPqAsm_32: {
6289    MCInst TmpInst;
6290    unsigned Spacing;
6291    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6292    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6293    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6294                                            Spacing));
6295    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6296                                            Spacing * 2));
6297    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6298                                            Spacing * 3));
6299    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6300    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6301    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6302    TmpInst.addOperand(Inst.getOperand(4));
6303    Inst = TmpInst;
6304    return true;
6305  }
6306
6307  case ARM::VLD4DUPdWB_fixed_Asm_8:
6308  case ARM::VLD4DUPdWB_fixed_Asm_16:
6309  case ARM::VLD4DUPdWB_fixed_Asm_32:
6310  case ARM::VLD4DUPqWB_fixed_Asm_8:
6311  case ARM::VLD4DUPqWB_fixed_Asm_16:
6312  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6313    MCInst TmpInst;
6314    unsigned Spacing;
6315    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6316    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6317    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6318                                            Spacing));
6319    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6320                                            Spacing * 2));
6321    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6322                                            Spacing * 3));
6323    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6324    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6325    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6326    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6327    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6328    TmpInst.addOperand(Inst.getOperand(4));
6329    Inst = TmpInst;
6330    return true;
6331  }
6332
6333  case ARM::VLD4DUPdWB_register_Asm_8:
6334  case ARM::VLD4DUPdWB_register_Asm_16:
6335  case ARM::VLD4DUPdWB_register_Asm_32:
6336  case ARM::VLD4DUPqWB_register_Asm_8:
6337  case ARM::VLD4DUPqWB_register_Asm_16:
6338  case ARM::VLD4DUPqWB_register_Asm_32: {
6339    MCInst TmpInst;
6340    unsigned Spacing;
6341    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6342    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6343    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6344                                            Spacing));
6345    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6346                                            Spacing * 2));
6347    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6348                                            Spacing * 3));
6349    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6350    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6351    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6352    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6353    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6354    TmpInst.addOperand(Inst.getOperand(5));
6355    Inst = TmpInst;
6356    return true;
6357  }
6358
6359  // VLD4 multiple 4-element structure instructions.
6360  case ARM::VLD4dAsm_8:
6361  case ARM::VLD4dAsm_16:
6362  case ARM::VLD4dAsm_32:
6363  case ARM::VLD4qAsm_8:
6364  case ARM::VLD4qAsm_16:
6365  case ARM::VLD4qAsm_32: {
6366    MCInst TmpInst;
6367    unsigned Spacing;
6368    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6369    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6370    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6371                                            Spacing));
6372    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6373                                            Spacing * 2));
6374    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6375                                            Spacing * 3));
6376    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6377    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6378    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6379    TmpInst.addOperand(Inst.getOperand(4));
6380    Inst = TmpInst;
6381    return true;
6382  }
6383
6384  case ARM::VLD4dWB_fixed_Asm_8:
6385  case ARM::VLD4dWB_fixed_Asm_16:
6386  case ARM::VLD4dWB_fixed_Asm_32:
6387  case ARM::VLD4qWB_fixed_Asm_8:
6388  case ARM::VLD4qWB_fixed_Asm_16:
6389  case ARM::VLD4qWB_fixed_Asm_32: {
6390    MCInst TmpInst;
6391    unsigned Spacing;
6392    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6393    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6394    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6395                                            Spacing));
6396    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6397                                            Spacing * 2));
6398    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6399                                            Spacing * 3));
6400    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6401    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6402    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6403    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6404    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6405    TmpInst.addOperand(Inst.getOperand(4));
6406    Inst = TmpInst;
6407    return true;
6408  }
6409
6410  case ARM::VLD4dWB_register_Asm_8:
6411  case ARM::VLD4dWB_register_Asm_16:
6412  case ARM::VLD4dWB_register_Asm_32:
6413  case ARM::VLD4qWB_register_Asm_8:
6414  case ARM::VLD4qWB_register_Asm_16:
6415  case ARM::VLD4qWB_register_Asm_32: {
6416    MCInst TmpInst;
6417    unsigned Spacing;
6418    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6419    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6420    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6421                                            Spacing));
6422    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6423                                            Spacing * 2));
6424    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6425                                            Spacing * 3));
6426    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6427    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6428    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6429    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6430    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6431    TmpInst.addOperand(Inst.getOperand(5));
6432    Inst = TmpInst;
6433    return true;
6434  }
6435
6436  // VST3 multiple 3-element structure instructions.
6437  case ARM::VST3dAsm_8:
6438  case ARM::VST3dAsm_16:
6439  case ARM::VST3dAsm_32:
6440  case ARM::VST3qAsm_8:
6441  case ARM::VST3qAsm_16:
6442  case ARM::VST3qAsm_32: {
6443    MCInst TmpInst;
6444    unsigned Spacing;
6445    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6446    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6447    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6448    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6449    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6450                                            Spacing));
6451    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6452                                            Spacing * 2));
6453    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6454    TmpInst.addOperand(Inst.getOperand(4));
6455    Inst = TmpInst;
6456    return true;
6457  }
6458
6459  case ARM::VST3dWB_fixed_Asm_8:
6460  case ARM::VST3dWB_fixed_Asm_16:
6461  case ARM::VST3dWB_fixed_Asm_32:
6462  case ARM::VST3qWB_fixed_Asm_8:
6463  case ARM::VST3qWB_fixed_Asm_16:
6464  case ARM::VST3qWB_fixed_Asm_32: {
6465    MCInst TmpInst;
6466    unsigned Spacing;
6467    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6468    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6469    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6470    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6471    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6472    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6473    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6474                                            Spacing));
6475    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6476                                            Spacing * 2));
6477    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6478    TmpInst.addOperand(Inst.getOperand(4));
6479    Inst = TmpInst;
6480    return true;
6481  }
6482
6483  case ARM::VST3dWB_register_Asm_8:
6484  case ARM::VST3dWB_register_Asm_16:
6485  case ARM::VST3dWB_register_Asm_32:
6486  case ARM::VST3qWB_register_Asm_8:
6487  case ARM::VST3qWB_register_Asm_16:
6488  case ARM::VST3qWB_register_Asm_32: {
6489    MCInst TmpInst;
6490    unsigned Spacing;
6491    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6492    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6493    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6494    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6495    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6496    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6497    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6498                                            Spacing));
6499    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6500                                            Spacing * 2));
6501    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6502    TmpInst.addOperand(Inst.getOperand(5));
6503    Inst = TmpInst;
6504    return true;
6505  }
6506
6507  // VST4 multiple 3-element structure instructions.
6508  case ARM::VST4dAsm_8:
6509  case ARM::VST4dAsm_16:
6510  case ARM::VST4dAsm_32:
6511  case ARM::VST4qAsm_8:
6512  case ARM::VST4qAsm_16:
6513  case ARM::VST4qAsm_32: {
6514    MCInst TmpInst;
6515    unsigned Spacing;
6516    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6517    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6518    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6519    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6520    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6521                                            Spacing));
6522    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6523                                            Spacing * 2));
6524    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6525                                            Spacing * 3));
6526    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6527    TmpInst.addOperand(Inst.getOperand(4));
6528    Inst = TmpInst;
6529    return true;
6530  }
6531
6532  case ARM::VST4dWB_fixed_Asm_8:
6533  case ARM::VST4dWB_fixed_Asm_16:
6534  case ARM::VST4dWB_fixed_Asm_32:
6535  case ARM::VST4qWB_fixed_Asm_8:
6536  case ARM::VST4qWB_fixed_Asm_16:
6537  case ARM::VST4qWB_fixed_Asm_32: {
6538    MCInst TmpInst;
6539    unsigned Spacing;
6540    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6541    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6542    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6543    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6544    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6545    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6546    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6547                                            Spacing));
6548    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6549                                            Spacing * 2));
6550    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6551                                            Spacing * 3));
6552    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6553    TmpInst.addOperand(Inst.getOperand(4));
6554    Inst = TmpInst;
6555    return true;
6556  }
6557
6558  case ARM::VST4dWB_register_Asm_8:
6559  case ARM::VST4dWB_register_Asm_16:
6560  case ARM::VST4dWB_register_Asm_32:
6561  case ARM::VST4qWB_register_Asm_8:
6562  case ARM::VST4qWB_register_Asm_16:
6563  case ARM::VST4qWB_register_Asm_32: {
6564    MCInst TmpInst;
6565    unsigned Spacing;
6566    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6567    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6568    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6569    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6570    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6571    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6572    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6573                                            Spacing));
6574    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6575                                            Spacing * 2));
6576    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6577                                            Spacing * 3));
6578    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6579    TmpInst.addOperand(Inst.getOperand(5));
6580    Inst = TmpInst;
6581    return true;
6582  }
6583
6584  // Handle the Thumb2 mode MOV complex aliases.
6585  case ARM::t2MOVsr:
6586  case ARM::t2MOVSsr: {
6587    // Which instruction to expand to depends on the CCOut operand and
6588    // whether we're in an IT block if the register operands are low
6589    // registers.
6590    bool isNarrow = false;
6591    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6592        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6593        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6594        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6595        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6596      isNarrow = true;
6597    MCInst TmpInst;
6598    unsigned newOpc;
6599    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6600    default: llvm_unreachable("unexpected opcode!");
6601    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6602    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6603    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6604    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6605    }
6606    TmpInst.setOpcode(newOpc);
6607    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6608    if (isNarrow)
6609      TmpInst.addOperand(MCOperand::CreateReg(
6610          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6611    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6612    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6613    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6614    TmpInst.addOperand(Inst.getOperand(5));
6615    if (!isNarrow)
6616      TmpInst.addOperand(MCOperand::CreateReg(
6617          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6618    Inst = TmpInst;
6619    return true;
6620  }
6621  case ARM::t2MOVsi:
6622  case ARM::t2MOVSsi: {
6623    // Which instruction to expand to depends on the CCOut operand and
6624    // whether we're in an IT block if the register operands are low
6625    // registers.
6626    bool isNarrow = false;
6627    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6628        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6629        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6630      isNarrow = true;
6631    MCInst TmpInst;
6632    unsigned newOpc;
6633    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6634    default: llvm_unreachable("unexpected opcode!");
6635    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6636    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6637    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6638    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6639    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6640    }
6641    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6642    if (Ammount == 32) Ammount = 0;
6643    TmpInst.setOpcode(newOpc);
6644    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6645    if (isNarrow)
6646      TmpInst.addOperand(MCOperand::CreateReg(
6647          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6648    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6649    if (newOpc != ARM::t2RRX)
6650      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6651    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6652    TmpInst.addOperand(Inst.getOperand(4));
6653    if (!isNarrow)
6654      TmpInst.addOperand(MCOperand::CreateReg(
6655          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6656    Inst = TmpInst;
6657    return true;
6658  }
6659  // Handle the ARM mode MOV complex aliases.
6660  case ARM::ASRr:
6661  case ARM::LSRr:
6662  case ARM::LSLr:
6663  case ARM::RORr: {
6664    ARM_AM::ShiftOpc ShiftTy;
6665    switch(Inst.getOpcode()) {
6666    default: llvm_unreachable("unexpected opcode!");
6667    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6668    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6669    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6670    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6671    }
6672    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6673    MCInst TmpInst;
6674    TmpInst.setOpcode(ARM::MOVsr);
6675    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6676    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6677    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6678    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6679    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6680    TmpInst.addOperand(Inst.getOperand(4));
6681    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6682    Inst = TmpInst;
6683    return true;
6684  }
6685  case ARM::ASRi:
6686  case ARM::LSRi:
6687  case ARM::LSLi:
6688  case ARM::RORi: {
6689    ARM_AM::ShiftOpc ShiftTy;
6690    switch(Inst.getOpcode()) {
6691    default: llvm_unreachable("unexpected opcode!");
6692    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6693    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6694    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6695    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6696    }
6697    // A shift by zero is a plain MOVr, not a MOVsi.
6698    unsigned Amt = Inst.getOperand(2).getImm();
6699    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6700    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6701    MCInst TmpInst;
6702    TmpInst.setOpcode(Opc);
6703    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6704    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6705    if (Opc == ARM::MOVsi)
6706      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6707    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6708    TmpInst.addOperand(Inst.getOperand(4));
6709    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6710    Inst = TmpInst;
6711    return true;
6712  }
6713  case ARM::RRXi: {
6714    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6715    MCInst TmpInst;
6716    TmpInst.setOpcode(ARM::MOVsi);
6717    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6718    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6719    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6720    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6721    TmpInst.addOperand(Inst.getOperand(3));
6722    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6723    Inst = TmpInst;
6724    return true;
6725  }
6726  case ARM::t2LDMIA_UPD: {
6727    // If this is a load of a single register, then we should use
6728    // a post-indexed LDR instruction instead, per the ARM ARM.
6729    if (Inst.getNumOperands() != 5)
6730      return false;
6731    MCInst TmpInst;
6732    TmpInst.setOpcode(ARM::t2LDR_POST);
6733    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6734    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6735    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6736    TmpInst.addOperand(MCOperand::CreateImm(4));
6737    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6738    TmpInst.addOperand(Inst.getOperand(3));
6739    Inst = TmpInst;
6740    return true;
6741  }
6742  case ARM::t2STMDB_UPD: {
6743    // If this is a store of a single register, then we should use
6744    // a pre-indexed STR instruction instead, per the ARM ARM.
6745    if (Inst.getNumOperands() != 5)
6746      return false;
6747    MCInst TmpInst;
6748    TmpInst.setOpcode(ARM::t2STR_PRE);
6749    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6750    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6751    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6752    TmpInst.addOperand(MCOperand::CreateImm(-4));
6753    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6754    TmpInst.addOperand(Inst.getOperand(3));
6755    Inst = TmpInst;
6756    return true;
6757  }
6758  case ARM::LDMIA_UPD:
6759    // If this is a load of a single register via a 'pop', then we should use
6760    // a post-indexed LDR instruction instead, per the ARM ARM.
6761    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6762        Inst.getNumOperands() == 5) {
6763      MCInst TmpInst;
6764      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6765      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6766      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6767      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6768      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6769      TmpInst.addOperand(MCOperand::CreateImm(4));
6770      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6771      TmpInst.addOperand(Inst.getOperand(3));
6772      Inst = TmpInst;
6773      return true;
6774    }
6775    break;
6776  case ARM::STMDB_UPD:
6777    // If this is a store of a single register via a 'push', then we should use
6778    // a pre-indexed STR instruction instead, per the ARM ARM.
6779    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6780        Inst.getNumOperands() == 5) {
6781      MCInst TmpInst;
6782      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6783      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6784      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6785      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6786      TmpInst.addOperand(MCOperand::CreateImm(-4));
6787      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6788      TmpInst.addOperand(Inst.getOperand(3));
6789      Inst = TmpInst;
6790    }
6791    break;
6792  case ARM::t2ADDri12:
6793    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6794    // mnemonic was used (not "addw"), encoding T3 is preferred.
6795    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6796        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6797      break;
6798    Inst.setOpcode(ARM::t2ADDri);
6799    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6800    break;
6801  case ARM::t2SUBri12:
6802    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6803    // mnemonic was used (not "subw"), encoding T3 is preferred.
6804    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6805        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6806      break;
6807    Inst.setOpcode(ARM::t2SUBri);
6808    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6809    break;
6810  case ARM::tADDi8:
6811    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6812    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6813    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6814    // to encoding T1 if <Rd> is omitted."
6815    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6816      Inst.setOpcode(ARM::tADDi3);
6817      return true;
6818    }
6819    break;
6820  case ARM::tSUBi8:
6821    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6822    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6823    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6824    // to encoding T1 if <Rd> is omitted."
6825    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6826      Inst.setOpcode(ARM::tSUBi3);
6827      return true;
6828    }
6829    break;
6830  case ARM::t2ADDrr: {
6831    // If the destination and first source operand are the same, and
6832    // there's no setting of the flags, use encoding T2 instead of T3.
6833    // Note that this is only for ADD, not SUB. This mirrors the system
6834    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6835    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6836        Inst.getOperand(5).getReg() != 0 ||
6837        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6838         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6839      break;
6840    MCInst TmpInst;
6841    TmpInst.setOpcode(ARM::tADDhirr);
6842    TmpInst.addOperand(Inst.getOperand(0));
6843    TmpInst.addOperand(Inst.getOperand(0));
6844    TmpInst.addOperand(Inst.getOperand(2));
6845    TmpInst.addOperand(Inst.getOperand(3));
6846    TmpInst.addOperand(Inst.getOperand(4));
6847    Inst = TmpInst;
6848    return true;
6849  }
6850  case ARM::tB:
6851    // A Thumb conditional branch outside of an IT block is a tBcc.
6852    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6853      Inst.setOpcode(ARM::tBcc);
6854      return true;
6855    }
6856    break;
6857  case ARM::t2B:
6858    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6859    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6860      Inst.setOpcode(ARM::t2Bcc);
6861      return true;
6862    }
6863    break;
6864  case ARM::t2Bcc:
6865    // If the conditional is AL or we're in an IT block, we really want t2B.
6866    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6867      Inst.setOpcode(ARM::t2B);
6868      return true;
6869    }
6870    break;
6871  case ARM::tBcc:
6872    // If the conditional is AL, we really want tB.
6873    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6874      Inst.setOpcode(ARM::tB);
6875      return true;
6876    }
6877    break;
6878  case ARM::tLDMIA: {
6879    // If the register list contains any high registers, or if the writeback
6880    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6881    // instead if we're in Thumb2. Otherwise, this should have generated
6882    // an error in validateInstruction().
6883    unsigned Rn = Inst.getOperand(0).getReg();
6884    bool hasWritebackToken =
6885      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6886       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6887    bool listContainsBase;
6888    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6889        (!listContainsBase && !hasWritebackToken) ||
6890        (listContainsBase && hasWritebackToken)) {
6891      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6892      assert (isThumbTwo());
6893      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6894      // If we're switching to the updating version, we need to insert
6895      // the writeback tied operand.
6896      if (hasWritebackToken)
6897        Inst.insert(Inst.begin(),
6898                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6899      return true;
6900    }
6901    break;
6902  }
6903  case ARM::tSTMIA_UPD: {
6904    // If the register list contains any high registers, we need to use
6905    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6906    // should have generated an error in validateInstruction().
6907    unsigned Rn = Inst.getOperand(0).getReg();
6908    bool listContainsBase;
6909    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
6910      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6911      assert (isThumbTwo());
6912      Inst.setOpcode(ARM::t2STMIA_UPD);
6913      return true;
6914    }
6915    break;
6916  }
6917  case ARM::tPOP: {
6918    bool listContainsBase;
6919    // If the register list contains any high registers, we need to use
6920    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6921    // should have generated an error in validateInstruction().
6922    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
6923      return false;
6924    assert (isThumbTwo());
6925    Inst.setOpcode(ARM::t2LDMIA_UPD);
6926    // Add the base register and writeback operands.
6927    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6928    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6929    return true;
6930  }
6931  case ARM::tPUSH: {
6932    bool listContainsBase;
6933    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
6934      return false;
6935    assert (isThumbTwo());
6936    Inst.setOpcode(ARM::t2STMDB_UPD);
6937    // Add the base register and writeback operands.
6938    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6939    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
6940    return true;
6941  }
6942  case ARM::t2MOVi: {
6943    // If we can use the 16-bit encoding and the user didn't explicitly
6944    // request the 32-bit variant, transform it here.
6945    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6946        Inst.getOperand(1).getImm() <= 255 &&
6947        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
6948         Inst.getOperand(4).getReg() == ARM::CPSR) ||
6949        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
6950        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6951         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6952      // The operands aren't in the same order for tMOVi8...
6953      MCInst TmpInst;
6954      TmpInst.setOpcode(ARM::tMOVi8);
6955      TmpInst.addOperand(Inst.getOperand(0));
6956      TmpInst.addOperand(Inst.getOperand(4));
6957      TmpInst.addOperand(Inst.getOperand(1));
6958      TmpInst.addOperand(Inst.getOperand(2));
6959      TmpInst.addOperand(Inst.getOperand(3));
6960      Inst = TmpInst;
6961      return true;
6962    }
6963    break;
6964  }
6965  case ARM::t2MOVr: {
6966    // If we can use the 16-bit encoding and the user didn't explicitly
6967    // request the 32-bit variant, transform it here.
6968    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6969        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6970        Inst.getOperand(2).getImm() == ARMCC::AL &&
6971        Inst.getOperand(4).getReg() == ARM::CPSR &&
6972        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6973         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6974      // The operands aren't the same for tMOV[S]r... (no cc_out)
6975      MCInst TmpInst;
6976      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
6977      TmpInst.addOperand(Inst.getOperand(0));
6978      TmpInst.addOperand(Inst.getOperand(1));
6979      TmpInst.addOperand(Inst.getOperand(2));
6980      TmpInst.addOperand(Inst.getOperand(3));
6981      Inst = TmpInst;
6982      return true;
6983    }
6984    break;
6985  }
6986  case ARM::t2SXTH:
6987  case ARM::t2SXTB:
6988  case ARM::t2UXTH:
6989  case ARM::t2UXTB: {
6990    // If we can use the 16-bit encoding and the user didn't explicitly
6991    // request the 32-bit variant, transform it here.
6992    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6993        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6994        Inst.getOperand(2).getImm() == 0 &&
6995        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
6996         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
6997      unsigned NewOpc;
6998      switch (Inst.getOpcode()) {
6999      default: llvm_unreachable("Illegal opcode!");
7000      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7001      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7002      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7003      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7004      }
7005      // The operands aren't the same for thumb1 (no rotate operand).
7006      MCInst TmpInst;
7007      TmpInst.setOpcode(NewOpc);
7008      TmpInst.addOperand(Inst.getOperand(0));
7009      TmpInst.addOperand(Inst.getOperand(1));
7010      TmpInst.addOperand(Inst.getOperand(3));
7011      TmpInst.addOperand(Inst.getOperand(4));
7012      Inst = TmpInst;
7013      return true;
7014    }
7015    break;
7016  }
7017  case ARM::MOVsi: {
7018    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7019    if (SOpc == ARM_AM::rrx) return false;
7020    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7021      // Shifting by zero is accepted as a vanilla 'MOVr'
7022      MCInst TmpInst;
7023      TmpInst.setOpcode(ARM::MOVr);
7024      TmpInst.addOperand(Inst.getOperand(0));
7025      TmpInst.addOperand(Inst.getOperand(1));
7026      TmpInst.addOperand(Inst.getOperand(3));
7027      TmpInst.addOperand(Inst.getOperand(4));
7028      TmpInst.addOperand(Inst.getOperand(5));
7029      Inst = TmpInst;
7030      return true;
7031    }
7032    return false;
7033  }
7034  case ARM::ANDrsi:
7035  case ARM::ORRrsi:
7036  case ARM::EORrsi:
7037  case ARM::BICrsi:
7038  case ARM::SUBrsi:
7039  case ARM::ADDrsi: {
7040    unsigned newOpc;
7041    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7042    if (SOpc == ARM_AM::rrx) return false;
7043    switch (Inst.getOpcode()) {
7044    default: llvm_unreachable("unexpected opcode!");
7045    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7046    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7047    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7048    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7049    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7050    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7051    }
7052    // If the shift is by zero, use the non-shifted instruction definition.
7053    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7054      MCInst TmpInst;
7055      TmpInst.setOpcode(newOpc);
7056      TmpInst.addOperand(Inst.getOperand(0));
7057      TmpInst.addOperand(Inst.getOperand(1));
7058      TmpInst.addOperand(Inst.getOperand(2));
7059      TmpInst.addOperand(Inst.getOperand(4));
7060      TmpInst.addOperand(Inst.getOperand(5));
7061      TmpInst.addOperand(Inst.getOperand(6));
7062      Inst = TmpInst;
7063      return true;
7064    }
7065    return false;
7066  }
7067  case ARM::ITasm:
7068  case ARM::t2IT: {
7069    // The mask bits for all but the first condition are represented as
7070    // the low bit of the condition code value implies 't'. We currently
7071    // always have 1 implies 't', so XOR toggle the bits if the low bit
7072    // of the condition code is zero. The encoding also expects the low
7073    // bit of the condition to be encoded as bit 4 of the mask operand,
7074    // so mask that in if needed
7075    MCOperand &MO = Inst.getOperand(1);
7076    unsigned Mask = MO.getImm();
7077    unsigned OrigMask = Mask;
7078    unsigned TZ = CountTrailingZeros_32(Mask);
7079    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7080      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7081      for (unsigned i = 3; i != TZ; --i)
7082        Mask ^= 1 << i;
7083    } else
7084      Mask |= 0x10;
7085    MO.setImm(Mask);
7086
7087    // Set up the IT block state according to the IT instruction we just
7088    // matched.
7089    assert(!inITBlock() && "nested IT blocks?!");
7090    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7091    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7092    ITState.CurPosition = 0;
7093    ITState.FirstCond = true;
7094    break;
7095  }
7096  }
7097  return false;
7098}
7099
7100unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7101  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7102  // suffix depending on whether they're in an IT block or not.
7103  unsigned Opc = Inst.getOpcode();
7104  const MCInstrDesc &MCID = getInstDesc(Opc);
7105  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7106    assert(MCID.hasOptionalDef() &&
7107           "optionally flag setting instruction missing optional def operand");
7108    assert(MCID.NumOperands == Inst.getNumOperands() &&
7109           "operand count mismatch!");
7110    // Find the optional-def operand (cc_out).
7111    unsigned OpNo;
7112    for (OpNo = 0;
7113         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7114         ++OpNo)
7115      ;
7116    // If we're parsing Thumb1, reject it completely.
7117    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7118      return Match_MnemonicFail;
7119    // If we're parsing Thumb2, which form is legal depends on whether we're
7120    // in an IT block.
7121    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7122        !inITBlock())
7123      return Match_RequiresITBlock;
7124    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7125        inITBlock())
7126      return Match_RequiresNotITBlock;
7127  }
7128  // Some high-register supporting Thumb1 encodings only allow both registers
7129  // to be from r0-r7 when in Thumb2.
7130  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7131           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7132           isARMLowRegister(Inst.getOperand(2).getReg()))
7133    return Match_RequiresThumb2;
7134  // Others only require ARMv6 or later.
7135  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7136           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7137           isARMLowRegister(Inst.getOperand(1).getReg()))
7138    return Match_RequiresV6;
7139  return Match_Success;
7140}
7141
7142bool ARMAsmParser::
7143MatchAndEmitInstruction(SMLoc IDLoc,
7144                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7145                        MCStreamer &Out) {
7146  MCInst Inst;
7147  unsigned ErrorInfo;
7148  unsigned MatchResult;
7149  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7150  switch (MatchResult) {
7151  default: break;
7152  case Match_Success:
7153    // Context sensitive operand constraints aren't handled by the matcher,
7154    // so check them here.
7155    if (validateInstruction(Inst, Operands)) {
7156      // Still progress the IT block, otherwise one wrong condition causes
7157      // nasty cascading errors.
7158      forwardITPosition();
7159      return true;
7160    }
7161
7162    // Some instructions need post-processing to, for example, tweak which
7163    // encoding is selected. Loop on it while changes happen so the
7164    // individual transformations can chain off each other. E.g.,
7165    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7166    while (processInstruction(Inst, Operands))
7167      ;
7168
7169    // Only move forward at the very end so that everything in validate
7170    // and process gets a consistent answer about whether we're in an IT
7171    // block.
7172    forwardITPosition();
7173
7174    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7175    // doesn't actually encode.
7176    if (Inst.getOpcode() == ARM::ITasm)
7177      return false;
7178
7179    Inst.setLoc(IDLoc);
7180    Out.EmitInstruction(Inst);
7181    return false;
7182  case Match_MissingFeature:
7183    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7184    return true;
7185  case Match_InvalidOperand: {
7186    SMLoc ErrorLoc = IDLoc;
7187    if (ErrorInfo != ~0U) {
7188      if (ErrorInfo >= Operands.size())
7189        return Error(IDLoc, "too few operands for instruction");
7190
7191      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7192      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7193    }
7194
7195    return Error(ErrorLoc, "invalid operand for instruction");
7196  }
7197  case Match_MnemonicFail:
7198    return Error(IDLoc, "invalid instruction");
7199  case Match_ConversionFail:
7200    // The converter function will have already emited a diagnostic.
7201    return true;
7202  case Match_RequiresNotITBlock:
7203    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7204  case Match_RequiresITBlock:
7205    return Error(IDLoc, "instruction only valid inside IT block");
7206  case Match_RequiresV6:
7207    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7208  case Match_RequiresThumb2:
7209    return Error(IDLoc, "instruction variant requires Thumb2");
7210  }
7211
7212  llvm_unreachable("Implement any new match types added!");
7213}
7214
7215/// parseDirective parses the arm specific directives
7216bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7217  StringRef IDVal = DirectiveID.getIdentifier();
7218  if (IDVal == ".word")
7219    return parseDirectiveWord(4, DirectiveID.getLoc());
7220  else if (IDVal == ".thumb")
7221    return parseDirectiveThumb(DirectiveID.getLoc());
7222  else if (IDVal == ".arm")
7223    return parseDirectiveARM(DirectiveID.getLoc());
7224  else if (IDVal == ".thumb_func")
7225    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7226  else if (IDVal == ".code")
7227    return parseDirectiveCode(DirectiveID.getLoc());
7228  else if (IDVal == ".syntax")
7229    return parseDirectiveSyntax(DirectiveID.getLoc());
7230  else if (IDVal == ".unreq")
7231    return parseDirectiveUnreq(DirectiveID.getLoc());
7232  else if (IDVal == ".arch")
7233    return parseDirectiveArch(DirectiveID.getLoc());
7234  else if (IDVal == ".eabi_attribute")
7235    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7236  return true;
7237}
7238
7239/// parseDirectiveWord
7240///  ::= .word [ expression (, expression)* ]
7241bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7242  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7243    for (;;) {
7244      const MCExpr *Value;
7245      if (getParser().ParseExpression(Value))
7246        return true;
7247
7248      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7249
7250      if (getLexer().is(AsmToken::EndOfStatement))
7251        break;
7252
7253      // FIXME: Improve diagnostic.
7254      if (getLexer().isNot(AsmToken::Comma))
7255        return Error(L, "unexpected token in directive");
7256      Parser.Lex();
7257    }
7258  }
7259
7260  Parser.Lex();
7261  return false;
7262}
7263
7264/// parseDirectiveThumb
7265///  ::= .thumb
7266bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7267  if (getLexer().isNot(AsmToken::EndOfStatement))
7268    return Error(L, "unexpected token in directive");
7269  Parser.Lex();
7270
7271  if (!isThumb())
7272    SwitchMode();
7273  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7274  return false;
7275}
7276
7277/// parseDirectiveARM
7278///  ::= .arm
7279bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7280  if (getLexer().isNot(AsmToken::EndOfStatement))
7281    return Error(L, "unexpected token in directive");
7282  Parser.Lex();
7283
7284  if (isThumb())
7285    SwitchMode();
7286  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7287  return false;
7288}
7289
7290/// parseDirectiveThumbFunc
7291///  ::= .thumbfunc symbol_name
7292bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7293  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7294  bool isMachO = MAI.hasSubsectionsViaSymbols();
7295  StringRef Name;
7296  bool needFuncName = true;
7297
7298  // Darwin asm has (optionally) function name after .thumb_func direction
7299  // ELF doesn't
7300  if (isMachO) {
7301    const AsmToken &Tok = Parser.getTok();
7302    if (Tok.isNot(AsmToken::EndOfStatement)) {
7303      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7304        return Error(L, "unexpected token in .thumb_func directive");
7305      Name = Tok.getIdentifier();
7306      Parser.Lex(); // Consume the identifier token.
7307      needFuncName = false;
7308    }
7309  }
7310
7311  if (getLexer().isNot(AsmToken::EndOfStatement))
7312    return Error(L, "unexpected token in directive");
7313
7314  // Eat the end of statement and any blank lines that follow.
7315  while (getLexer().is(AsmToken::EndOfStatement))
7316    Parser.Lex();
7317
7318  // FIXME: assuming function name will be the line following .thumb_func
7319  // We really should be checking the next symbol definition even if there's
7320  // stuff in between.
7321  if (needFuncName) {
7322    Name = Parser.getTok().getIdentifier();
7323  }
7324
7325  // Mark symbol as a thumb symbol.
7326  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7327  getParser().getStreamer().EmitThumbFunc(Func);
7328  return false;
7329}
7330
7331/// parseDirectiveSyntax
7332///  ::= .syntax unified | divided
7333bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7334  const AsmToken &Tok = Parser.getTok();
7335  if (Tok.isNot(AsmToken::Identifier))
7336    return Error(L, "unexpected token in .syntax directive");
7337  StringRef Mode = Tok.getString();
7338  if (Mode == "unified" || Mode == "UNIFIED")
7339    Parser.Lex();
7340  else if (Mode == "divided" || Mode == "DIVIDED")
7341    return Error(L, "'.syntax divided' arm asssembly not supported");
7342  else
7343    return Error(L, "unrecognized syntax mode in .syntax directive");
7344
7345  if (getLexer().isNot(AsmToken::EndOfStatement))
7346    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7347  Parser.Lex();
7348
7349  // TODO tell the MC streamer the mode
7350  // getParser().getStreamer().Emit???();
7351  return false;
7352}
7353
7354/// parseDirectiveCode
7355///  ::= .code 16 | 32
7356bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7357  const AsmToken &Tok = Parser.getTok();
7358  if (Tok.isNot(AsmToken::Integer))
7359    return Error(L, "unexpected token in .code directive");
7360  int64_t Val = Parser.getTok().getIntVal();
7361  if (Val == 16)
7362    Parser.Lex();
7363  else if (Val == 32)
7364    Parser.Lex();
7365  else
7366    return Error(L, "invalid operand to .code directive");
7367
7368  if (getLexer().isNot(AsmToken::EndOfStatement))
7369    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7370  Parser.Lex();
7371
7372  if (Val == 16) {
7373    if (!isThumb())
7374      SwitchMode();
7375    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7376  } else {
7377    if (isThumb())
7378      SwitchMode();
7379    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7380  }
7381
7382  return false;
7383}
7384
7385/// parseDirectiveReq
7386///  ::= name .req registername
7387bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7388  Parser.Lex(); // Eat the '.req' token.
7389  unsigned Reg;
7390  SMLoc SRegLoc, ERegLoc;
7391  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7392    Parser.EatToEndOfStatement();
7393    return Error(SRegLoc, "register name expected");
7394  }
7395
7396  // Shouldn't be anything else.
7397  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7398    Parser.EatToEndOfStatement();
7399    return Error(Parser.getTok().getLoc(),
7400                 "unexpected input in .req directive.");
7401  }
7402
7403  Parser.Lex(); // Consume the EndOfStatement
7404
7405  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7406    return Error(SRegLoc, "redefinition of '" + Name +
7407                          "' does not match original.");
7408
7409  return false;
7410}
7411
7412/// parseDirectiveUneq
7413///  ::= .unreq registername
7414bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7415  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7416    Parser.EatToEndOfStatement();
7417    return Error(L, "unexpected input in .unreq directive.");
7418  }
7419  RegisterReqs.erase(Parser.getTok().getIdentifier());
7420  Parser.Lex(); // Eat the identifier.
7421  return false;
7422}
7423
7424/// parseDirectiveArch
7425///  ::= .arch token
7426bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7427  return true;
7428}
7429
7430/// parseDirectiveEabiAttr
7431///  ::= .eabi_attribute int, int
7432bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7433  return true;
7434}
7435
7436extern "C" void LLVMInitializeARMAsmLexer();
7437
7438/// Force static initialization.
7439extern "C" void LLVMInitializeARMAsmParser() {
7440  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7441  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7442  LLVMInitializeARMAsmLexer();
7443}
7444
7445#define GET_REGISTER_MATCHER
7446#define GET_MATCHER_IMPLEMENTATION
7447#include "ARMGenAsmMatcher.inc"
7448