ARMAsmParser.cpp revision a23ecc2ba945c9685a76552276e5f6f41859b4ab
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47  const MCRegisterInfo *MRI;
48
49  // Map of register aliases registers via the .req directive.
50  StringMap<unsigned> RegisterReqs;
51
52  struct {
53    ARMCC::CondCodes Cond;    // Condition for IT block.
54    unsigned Mask:4;          // Condition mask for instructions.
55                              // Starting at first 1 (from lsb).
56                              //   '1'  condition as indicated in IT.
57                              //   '0'  inverse of condition (else).
58                              // Count of instructions in IT block is
59                              // 4 - trailingzeroes(mask)
60
61    bool FirstCond;           // Explicit flag for when we're parsing the
62                              // First instruction in the IT block. It's
63                              // implied in the mask, so needs special
64                              // handling.
65
66    unsigned CurPosition;     // Current position in parsing of IT
67                              // block. In range [0,3]. Initialized
68                              // according to count of instructions in block.
69                              // ~0U if no active IT block.
70  } ITState;
71  bool inITBlock() { return ITState.CurPosition != ~0U;}
72  void forwardITPosition() {
73    if (!inITBlock()) return;
74    // Move to the next instruction in the IT block, if there is one. If not,
75    // mark the block as done.
76    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
77    if (++ITState.CurPosition == 5 - TZ)
78      ITState.CurPosition = ~0U; // Done with the IT block after this.
79  }
80
81
82  MCAsmParser &getParser() const { return Parser; }
83  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
84
85  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
86  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
87
88  int tryParseRegister();
89  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
90  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
93  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
94  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
95  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
96                              unsigned &ShiftAmount);
97  bool parseDirectiveWord(unsigned Size, SMLoc L);
98  bool parseDirectiveThumb(SMLoc L);
99  bool parseDirectiveARM(SMLoc L);
100  bool parseDirectiveThumbFunc(SMLoc L);
101  bool parseDirectiveCode(SMLoc L);
102  bool parseDirectiveSyntax(SMLoc L);
103  bool parseDirectiveReq(StringRef Name, SMLoc L);
104  bool parseDirectiveUnreq(SMLoc L);
105  bool parseDirectiveArch(SMLoc L);
106  bool parseDirectiveEabiAttr(SMLoc L);
107
108  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
109                          bool &CarrySetting, unsigned &ProcessorIMod,
110                          StringRef &ITMask);
111  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
112                             bool &CanAcceptPredicationCode);
113
114  bool isThumb() const {
115    // FIXME: Can tablegen auto-generate this?
116    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
117  }
118  bool isThumbOne() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
120  }
121  bool isThumbTwo() const {
122    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
123  }
124  bool hasV6Ops() const {
125    return STI.getFeatureBits() & ARM::HasV6Ops;
126  }
127  bool hasV7Ops() const {
128    return STI.getFeatureBits() & ARM::HasV7Ops;
129  }
130  void SwitchMode() {
131    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
132    setAvailableFeatures(FB);
133  }
134  bool isMClass() const {
135    return STI.getFeatureBits() & ARM::FeatureMClass;
136  }
137
138  /// @name Auto-generated Match Functions
139  /// {
140
141#define GET_ASSEMBLER_HEADER
142#include "ARMGenAsmMatcher.inc"
143
144  /// }
145
146  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
147  OperandMatchResultTy parseCoprocNumOperand(
148    SmallVectorImpl<MCParsedAsmOperand*>&);
149  OperandMatchResultTy parseCoprocRegOperand(
150    SmallVectorImpl<MCParsedAsmOperand*>&);
151  OperandMatchResultTy parseCoprocOptionOperand(
152    SmallVectorImpl<MCParsedAsmOperand*>&);
153  OperandMatchResultTy parseMemBarrierOptOperand(
154    SmallVectorImpl<MCParsedAsmOperand*>&);
155  OperandMatchResultTy parseProcIFlagsOperand(
156    SmallVectorImpl<MCParsedAsmOperand*>&);
157  OperandMatchResultTy parseMSRMaskOperand(
158    SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
160                                   StringRef Op, int Low, int High);
161  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "lsl", 0, 31);
163  }
164  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
165    return parsePKHImm(O, "asr", 1, 32);
166  }
167  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
173  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
174  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
175  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
176
177  // Asm Match Converter Methods
178  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
179                    const SmallVectorImpl<MCParsedAsmOperand*> &);
180  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
181                    const SmallVectorImpl<MCParsedAsmOperand*> &);
182  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
183                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
184  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
185                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
187                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
189                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
191                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
193                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
195                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
197                             const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
199                             const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
201                             const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
203                             const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
205                  const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
207                  const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
209                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
211                        const SmallVectorImpl<MCParsedAsmOperand*> &);
212  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
213                     const SmallVectorImpl<MCParsedAsmOperand*> &);
214  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
215                        const SmallVectorImpl<MCParsedAsmOperand*> &);
216  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
217                     const SmallVectorImpl<MCParsedAsmOperand*> &);
218  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
219                        const SmallVectorImpl<MCParsedAsmOperand*> &);
220
221  bool validateInstruction(MCInst &Inst,
222                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
223  bool processInstruction(MCInst &Inst,
224                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
225  bool shouldOmitCCOutOperand(StringRef Mnemonic,
226                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
227
228public:
229  enum ARMMatchResultTy {
230    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
231    Match_RequiresNotITBlock,
232    Match_RequiresV6,
233    Match_RequiresThumb2
234  };
235
236  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
237    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
238    MCAsmParserExtension::Initialize(_Parser);
239
240    // Cache the MCRegisterInfo.
241    MRI = &getContext().getRegisterInfo();
242
243    // Initialize the set of available features.
244    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
245
246    // Not in an ITBlock to start with.
247    ITState.CurPosition = ~0U;
248  }
249
250  // Implementation of the MCTargetAsmParser interface:
251  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
252  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
253                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
254  bool ParseDirective(AsmToken DirectiveID);
255
256  unsigned checkTargetMatchPredicate(MCInst &Inst);
257
258  bool MatchAndEmitInstruction(SMLoc IDLoc,
259                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
260                               MCStreamer &Out);
261};
262} // end anonymous namespace
263
264namespace {
265
266/// ARMOperand - Instances of this class represent a parsed ARM machine
267/// instruction.
268class ARMOperand : public MCParsedAsmOperand {
269  enum KindTy {
270    k_CondCode,
271    k_CCOut,
272    k_ITCondMask,
273    k_CoprocNum,
274    k_CoprocReg,
275    k_CoprocOption,
276    k_Immediate,
277    k_MemBarrierOpt,
278    k_Memory,
279    k_PostIndexRegister,
280    k_MSRMask,
281    k_ProcIFlags,
282    k_VectorIndex,
283    k_Register,
284    k_RegisterList,
285    k_DPRRegisterList,
286    k_SPRRegisterList,
287    k_VectorList,
288    k_VectorListAllLanes,
289    k_VectorListIndexed,
290    k_ShiftedRegister,
291    k_ShiftedImmediate,
292    k_ShifterImmediate,
293    k_RotateImmediate,
294    k_BitfieldDescriptor,
295    k_Token
296  } Kind;
297
298  SMLoc StartLoc, EndLoc;
299  SmallVector<unsigned, 8> Registers;
300
301  union {
302    struct {
303      ARMCC::CondCodes Val;
304    } CC;
305
306    struct {
307      unsigned Val;
308    } Cop;
309
310    struct {
311      unsigned Val;
312    } CoprocOption;
313
314    struct {
315      unsigned Mask:4;
316    } ITMask;
317
318    struct {
319      ARM_MB::MemBOpt Val;
320    } MBOpt;
321
322    struct {
323      ARM_PROC::IFlags Val;
324    } IFlags;
325
326    struct {
327      unsigned Val;
328    } MMask;
329
330    struct {
331      const char *Data;
332      unsigned Length;
333    } Tok;
334
335    struct {
336      unsigned RegNum;
337    } Reg;
338
339    // A vector register list is a sequential list of 1 to 4 registers.
340    struct {
341      unsigned RegNum;
342      unsigned Count;
343      unsigned LaneIndex;
344      bool isDoubleSpaced;
345    } VectorList;
346
347    struct {
348      unsigned Val;
349    } VectorIndex;
350
351    struct {
352      const MCExpr *Val;
353    } Imm;
354
355    /// Combined record for all forms of ARM address expressions.
356    struct {
357      unsigned BaseRegNum;
358      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
359      // was specified.
360      const MCConstantExpr *OffsetImm;  // Offset immediate value
361      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
362      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
363      unsigned ShiftImm;        // shift for OffsetReg.
364      unsigned Alignment;       // 0 = no alignment specified
365                                // n = alignment in bytes (2, 4, 8, 16, or 32)
366      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
367    } Memory;
368
369    struct {
370      unsigned RegNum;
371      bool isAdd;
372      ARM_AM::ShiftOpc ShiftTy;
373      unsigned ShiftImm;
374    } PostIdxReg;
375
376    struct {
377      bool isASR;
378      unsigned Imm;
379    } ShifterImm;
380    struct {
381      ARM_AM::ShiftOpc ShiftTy;
382      unsigned SrcReg;
383      unsigned ShiftReg;
384      unsigned ShiftImm;
385    } RegShiftedReg;
386    struct {
387      ARM_AM::ShiftOpc ShiftTy;
388      unsigned SrcReg;
389      unsigned ShiftImm;
390    } RegShiftedImm;
391    struct {
392      unsigned Imm;
393    } RotImm;
394    struct {
395      unsigned LSB;
396      unsigned Width;
397    } Bitfield;
398  };
399
400  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
401public:
402  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
403    Kind = o.Kind;
404    StartLoc = o.StartLoc;
405    EndLoc = o.EndLoc;
406    switch (Kind) {
407    case k_CondCode:
408      CC = o.CC;
409      break;
410    case k_ITCondMask:
411      ITMask = o.ITMask;
412      break;
413    case k_Token:
414      Tok = o.Tok;
415      break;
416    case k_CCOut:
417    case k_Register:
418      Reg = o.Reg;
419      break;
420    case k_RegisterList:
421    case k_DPRRegisterList:
422    case k_SPRRegisterList:
423      Registers = o.Registers;
424      break;
425    case k_VectorList:
426    case k_VectorListAllLanes:
427    case k_VectorListIndexed:
428      VectorList = o.VectorList;
429      break;
430    case k_CoprocNum:
431    case k_CoprocReg:
432      Cop = o.Cop;
433      break;
434    case k_CoprocOption:
435      CoprocOption = o.CoprocOption;
436      break;
437    case k_Immediate:
438      Imm = o.Imm;
439      break;
440    case k_MemBarrierOpt:
441      MBOpt = o.MBOpt;
442      break;
443    case k_Memory:
444      Memory = o.Memory;
445      break;
446    case k_PostIndexRegister:
447      PostIdxReg = o.PostIdxReg;
448      break;
449    case k_MSRMask:
450      MMask = o.MMask;
451      break;
452    case k_ProcIFlags:
453      IFlags = o.IFlags;
454      break;
455    case k_ShifterImmediate:
456      ShifterImm = o.ShifterImm;
457      break;
458    case k_ShiftedRegister:
459      RegShiftedReg = o.RegShiftedReg;
460      break;
461    case k_ShiftedImmediate:
462      RegShiftedImm = o.RegShiftedImm;
463      break;
464    case k_RotateImmediate:
465      RotImm = o.RotImm;
466      break;
467    case k_BitfieldDescriptor:
468      Bitfield = o.Bitfield;
469      break;
470    case k_VectorIndex:
471      VectorIndex = o.VectorIndex;
472      break;
473    }
474  }
475
476  /// getStartLoc - Get the location of the first token of this operand.
477  SMLoc getStartLoc() const { return StartLoc; }
478  /// getEndLoc - Get the location of the last token of this operand.
479  SMLoc getEndLoc() const { return EndLoc; }
480
481  ARMCC::CondCodes getCondCode() const {
482    assert(Kind == k_CondCode && "Invalid access!");
483    return CC.Val;
484  }
485
486  unsigned getCoproc() const {
487    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
488    return Cop.Val;
489  }
490
491  StringRef getToken() const {
492    assert(Kind == k_Token && "Invalid access!");
493    return StringRef(Tok.Data, Tok.Length);
494  }
495
496  unsigned getReg() const {
497    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
498    return Reg.RegNum;
499  }
500
501  const SmallVectorImpl<unsigned> &getRegList() const {
502    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
503            Kind == k_SPRRegisterList) && "Invalid access!");
504    return Registers;
505  }
506
507  const MCExpr *getImm() const {
508    assert(isImm() && "Invalid access!");
509    return Imm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const {
541    if (!isImm()) return false;
542    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
543    if (!CE) return false;
544    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
545    return Val != -1;
546  }
547  bool isFBits16() const {
548    if (!isImm()) return false;
549    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
550    if (!CE) return false;
551    int64_t Value = CE->getValue();
552    return Value >= 0 && Value <= 16;
553  }
554  bool isFBits32() const {
555    if (!isImm()) return false;
556    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
557    if (!CE) return false;
558    int64_t Value = CE->getValue();
559    return Value >= 1 && Value <= 32;
560  }
561  bool isImm8s4() const {
562    if (!isImm()) return false;
563    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
564    if (!CE) return false;
565    int64_t Value = CE->getValue();
566    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
567  }
568  bool isImm0_1020s4() const {
569    if (!isImm()) return false;
570    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
571    if (!CE) return false;
572    int64_t Value = CE->getValue();
573    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
574  }
575  bool isImm0_508s4() const {
576    if (!isImm()) return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
581  }
582  bool isImm0_508s4Neg() const {
583    if (!isImm()) return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = -CE->getValue();
587    // explicitly exclude zero. we want that to use the normal 0_508 version.
588    return ((Value & 3) == 0) && Value > 0 && Value <= 508;
589  }
590  bool isImm0_255() const {
591    if (!isImm()) return false;
592    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
593    if (!CE) return false;
594    int64_t Value = CE->getValue();
595    return Value >= 0 && Value < 256;
596  }
597  bool isImm0_4095() const {
598    if (!isImm()) return false;
599    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
600    if (!CE) return false;
601    int64_t Value = CE->getValue();
602    return Value >= 0 && Value < 4096;
603  }
604  bool isImm0_4095Neg() const {
605    if (!isImm()) return false;
606    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
607    if (!CE) return false;
608    int64_t Value = -CE->getValue();
609    return Value > 0 && Value < 4096;
610  }
611  bool isImm0_1() const {
612    if (!isImm()) return false;
613    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
614    if (!CE) return false;
615    int64_t Value = CE->getValue();
616    return Value >= 0 && Value < 2;
617  }
618  bool isImm0_3() const {
619    if (!isImm()) return false;
620    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
621    if (!CE) return false;
622    int64_t Value = CE->getValue();
623    return Value >= 0 && Value < 4;
624  }
625  bool isImm0_7() const {
626    if (!isImm()) return false;
627    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
628    if (!CE) return false;
629    int64_t Value = CE->getValue();
630    return Value >= 0 && Value < 8;
631  }
632  bool isImm0_15() const {
633    if (!isImm()) return false;
634    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
635    if (!CE) return false;
636    int64_t Value = CE->getValue();
637    return Value >= 0 && Value < 16;
638  }
639  bool isImm0_31() const {
640    if (!isImm()) return false;
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642    if (!CE) return false;
643    int64_t Value = CE->getValue();
644    return Value >= 0 && Value < 32;
645  }
646  bool isImm0_63() const {
647    if (!isImm()) return false;
648    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
649    if (!CE) return false;
650    int64_t Value = CE->getValue();
651    return Value >= 0 && Value < 64;
652  }
653  bool isImm8() const {
654    if (!isImm()) return false;
655    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
656    if (!CE) return false;
657    int64_t Value = CE->getValue();
658    return Value == 8;
659  }
660  bool isImm16() const {
661    if (!isImm()) return false;
662    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
663    if (!CE) return false;
664    int64_t Value = CE->getValue();
665    return Value == 16;
666  }
667  bool isImm32() const {
668    if (!isImm()) return false;
669    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
670    if (!CE) return false;
671    int64_t Value = CE->getValue();
672    return Value == 32;
673  }
674  bool isShrImm8() const {
675    if (!isImm()) return false;
676    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
677    if (!CE) return false;
678    int64_t Value = CE->getValue();
679    return Value > 0 && Value <= 8;
680  }
681  bool isShrImm16() const {
682    if (!isImm()) return false;
683    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
684    if (!CE) return false;
685    int64_t Value = CE->getValue();
686    return Value > 0 && Value <= 16;
687  }
688  bool isShrImm32() const {
689    if (!isImm()) return false;
690    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
691    if (!CE) return false;
692    int64_t Value = CE->getValue();
693    return Value > 0 && Value <= 32;
694  }
695  bool isShrImm64() const {
696    if (!isImm()) return false;
697    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
698    if (!CE) return false;
699    int64_t Value = CE->getValue();
700    return Value > 0 && Value <= 64;
701  }
702  bool isImm1_7() const {
703    if (!isImm()) return false;
704    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
705    if (!CE) return false;
706    int64_t Value = CE->getValue();
707    return Value > 0 && Value < 8;
708  }
709  bool isImm1_15() const {
710    if (!isImm()) return false;
711    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712    if (!CE) return false;
713    int64_t Value = CE->getValue();
714    return Value > 0 && Value < 16;
715  }
716  bool isImm1_31() const {
717    if (!isImm()) return false;
718    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
719    if (!CE) return false;
720    int64_t Value = CE->getValue();
721    return Value > 0 && Value < 32;
722  }
723  bool isImm1_16() const {
724    if (!isImm()) return false;
725    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
726    if (!CE) return false;
727    int64_t Value = CE->getValue();
728    return Value > 0 && Value < 17;
729  }
730  bool isImm1_32() const {
731    if (!isImm()) return false;
732    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
733    if (!CE) return false;
734    int64_t Value = CE->getValue();
735    return Value > 0 && Value < 33;
736  }
737  bool isImm0_32() const {
738    if (!isImm()) return false;
739    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
740    if (!CE) return false;
741    int64_t Value = CE->getValue();
742    return Value >= 0 && Value < 33;
743  }
744  bool isImm0_65535() const {
745    if (!isImm()) return false;
746    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
747    if (!CE) return false;
748    int64_t Value = CE->getValue();
749    return Value >= 0 && Value < 65536;
750  }
751  bool isImm0_65535Expr() const {
752    if (!isImm()) return false;
753    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754    // If it's not a constant expression, it'll generate a fixup and be
755    // handled later.
756    if (!CE) return true;
757    int64_t Value = CE->getValue();
758    return Value >= 0 && Value < 65536;
759  }
760  bool isImm24bit() const {
761    if (!isImm()) return false;
762    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
763    if (!CE) return false;
764    int64_t Value = CE->getValue();
765    return Value >= 0 && Value <= 0xffffff;
766  }
767  bool isImmThumbSR() const {
768    if (!isImm()) return false;
769    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
770    if (!CE) return false;
771    int64_t Value = CE->getValue();
772    return Value > 0 && Value < 33;
773  }
774  bool isPKHLSLImm() const {
775    if (!isImm()) return false;
776    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
777    if (!CE) return false;
778    int64_t Value = CE->getValue();
779    return Value >= 0 && Value < 32;
780  }
781  bool isPKHASRImm() const {
782    if (!isImm()) return false;
783    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
784    if (!CE) return false;
785    int64_t Value = CE->getValue();
786    return Value > 0 && Value <= 32;
787  }
788  bool isARMSOImm() const {
789    if (!isImm()) return false;
790    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
791    if (!CE) return false;
792    int64_t Value = CE->getValue();
793    return ARM_AM::getSOImmVal(Value) != -1;
794  }
795  bool isARMSOImmNot() const {
796    if (!isImm()) return false;
797    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
798    if (!CE) return false;
799    int64_t Value = CE->getValue();
800    return ARM_AM::getSOImmVal(~Value) != -1;
801  }
802  bool isARMSOImmNeg() const {
803    if (!isImm()) return false;
804    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
805    if (!CE) return false;
806    int64_t Value = CE->getValue();
807    // Only use this when not representable as a plain so_imm.
808    return ARM_AM::getSOImmVal(Value) == -1 &&
809      ARM_AM::getSOImmVal(-Value) != -1;
810  }
811  bool isT2SOImm() const {
812    if (!isImm()) return false;
813    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
814    if (!CE) return false;
815    int64_t Value = CE->getValue();
816    return ARM_AM::getT2SOImmVal(Value) != -1;
817  }
818  bool isT2SOImmNot() const {
819    if (!isImm()) return false;
820    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
821    if (!CE) return false;
822    int64_t Value = CE->getValue();
823    return ARM_AM::getT2SOImmVal(~Value) != -1;
824  }
825  bool isT2SOImmNeg() const {
826    if (!isImm()) return false;
827    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
828    if (!CE) return false;
829    int64_t Value = CE->getValue();
830    // Only use this when not representable as a plain so_imm.
831    return ARM_AM::getT2SOImmVal(Value) == -1 &&
832      ARM_AM::getT2SOImmVal(-Value) != -1;
833  }
834  bool isSetEndImm() const {
835    if (!isImm()) return false;
836    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
837    if (!CE) return false;
838    int64_t Value = CE->getValue();
839    return Value == 1 || Value == 0;
840  }
841  bool isReg() const { return Kind == k_Register; }
842  bool isRegList() const { return Kind == k_RegisterList; }
843  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
844  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
845  bool isToken() const { return Kind == k_Token; }
846  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
847  bool isMemory() const { return Kind == k_Memory; }
848  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
849  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
850  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
851  bool isRotImm() const { return Kind == k_RotateImmediate; }
852  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
853  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
854  bool isPostIdxReg() const {
855    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
856  }
857  bool isMemNoOffset(bool alignOK = false) const {
858    if (!isMemory())
859      return false;
860    // No offset of any kind.
861    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
862     (alignOK || Memory.Alignment == 0);
863  }
864  bool isMemPCRelImm12() const {
865    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
866      return false;
867    // Base register must be PC.
868    if (Memory.BaseRegNum != ARM::PC)
869      return false;
870    // Immediate offset in range [-4095, 4095].
871    if (!Memory.OffsetImm) return true;
872    int64_t Val = Memory.OffsetImm->getValue();
873    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
874  }
875  bool isAlignedMemory() const {
876    return isMemNoOffset(true);
877  }
878  bool isAddrMode2() const {
879    if (!isMemory() || Memory.Alignment != 0) return false;
880    // Check for register offset.
881    if (Memory.OffsetRegNum) return true;
882    // Immediate offset in range [-4095, 4095].
883    if (!Memory.OffsetImm) return true;
884    int64_t Val = Memory.OffsetImm->getValue();
885    return Val > -4096 && Val < 4096;
886  }
887  bool isAM2OffsetImm() const {
888    if (!isImm()) return false;
889    // Immediate offset in range [-4095, 4095].
890    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
891    if (!CE) return false;
892    int64_t Val = CE->getValue();
893    return Val > -4096 && Val < 4096;
894  }
895  bool isAddrMode3() const {
896    // If we have an immediate that's not a constant, treat it as a label
897    // reference needing a fixup. If it is a constant, it's something else
898    // and we reject it.
899    if (isImm() && !isa<MCConstantExpr>(getImm()))
900      return true;
901    if (!isMemory() || Memory.Alignment != 0) return false;
902    // No shifts are legal for AM3.
903    if (Memory.ShiftType != ARM_AM::no_shift) return false;
904    // Check for register offset.
905    if (Memory.OffsetRegNum) return true;
906    // Immediate offset in range [-255, 255].
907    if (!Memory.OffsetImm) return true;
908    int64_t Val = Memory.OffsetImm->getValue();
909    return Val > -256 && Val < 256;
910  }
911  bool isAM3Offset() const {
912    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
913      return false;
914    if (Kind == k_PostIndexRegister)
915      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
916    // Immediate offset in range [-255, 255].
917    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
918    if (!CE) return false;
919    int64_t Val = CE->getValue();
920    // Special case, #-0 is INT32_MIN.
921    return (Val > -256 && Val < 256) || Val == INT32_MIN;
922  }
923  bool isAddrMode5() const {
924    // If we have an immediate that's not a constant, treat it as a label
925    // reference needing a fixup. If it is a constant, it's something else
926    // and we reject it.
927    if (isImm() && !isa<MCConstantExpr>(getImm()))
928      return true;
929    if (!isMemory() || Memory.Alignment != 0) return false;
930    // Check for register offset.
931    if (Memory.OffsetRegNum) return false;
932    // Immediate offset in range [-1020, 1020] and a multiple of 4.
933    if (!Memory.OffsetImm) return true;
934    int64_t Val = Memory.OffsetImm->getValue();
935    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
936      Val == INT32_MIN;
937  }
938  bool isMemTBB() const {
939    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
940        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
941      return false;
942    return true;
943  }
944  bool isMemTBH() const {
945    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
946        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
947        Memory.Alignment != 0 )
948      return false;
949    return true;
950  }
951  bool isMemRegOffset() const {
952    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
953      return false;
954    return true;
955  }
956  bool isT2MemRegOffset() const {
957    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
958        Memory.Alignment != 0)
959      return false;
960    // Only lsl #{0, 1, 2, 3} allowed.
961    if (Memory.ShiftType == ARM_AM::no_shift)
962      return true;
963    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
964      return false;
965    return true;
966  }
967  bool isMemThumbRR() const {
968    // Thumb reg+reg addressing is simple. Just two registers, a base and
969    // an offset. No shifts, negations or any other complicating factors.
970    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
971        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
972      return false;
973    return isARMLowRegister(Memory.BaseRegNum) &&
974      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
975  }
976  bool isMemThumbRIs4() const {
977    if (!isMemory() || Memory.OffsetRegNum != 0 ||
978        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
979      return false;
980    // Immediate offset, multiple of 4 in range [0, 124].
981    if (!Memory.OffsetImm) return true;
982    int64_t Val = Memory.OffsetImm->getValue();
983    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
984  }
985  bool isMemThumbRIs2() const {
986    if (!isMemory() || Memory.OffsetRegNum != 0 ||
987        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
988      return false;
989    // Immediate offset, multiple of 4 in range [0, 62].
990    if (!Memory.OffsetImm) return true;
991    int64_t Val = Memory.OffsetImm->getValue();
992    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
993  }
994  bool isMemThumbRIs1() const {
995    if (!isMemory() || Memory.OffsetRegNum != 0 ||
996        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
997      return false;
998    // Immediate offset in range [0, 31].
999    if (!Memory.OffsetImm) return true;
1000    int64_t Val = Memory.OffsetImm->getValue();
1001    return Val >= 0 && Val <= 31;
1002  }
1003  bool isMemThumbSPI() const {
1004    if (!isMemory() || Memory.OffsetRegNum != 0 ||
1005        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
1006      return false;
1007    // Immediate offset, multiple of 4 in range [0, 1020].
1008    if (!Memory.OffsetImm) return true;
1009    int64_t Val = Memory.OffsetImm->getValue();
1010    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
1011  }
1012  bool isMemImm8s4Offset() const {
1013    // If we have an immediate that's not a constant, treat it as a label
1014    // reference needing a fixup. If it is a constant, it's something else
1015    // and we reject it.
1016    if (isImm() && !isa<MCConstantExpr>(getImm()))
1017      return true;
1018    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1019      return false;
1020    // Immediate offset a multiple of 4 in range [-1020, 1020].
1021    if (!Memory.OffsetImm) return true;
1022    int64_t Val = Memory.OffsetImm->getValue();
1023    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1024  }
1025  bool isMemImm0_1020s4Offset() const {
1026    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1027      return false;
1028    // Immediate offset a multiple of 4 in range [0, 1020].
1029    if (!Memory.OffsetImm) return true;
1030    int64_t Val = Memory.OffsetImm->getValue();
1031    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1032  }
1033  bool isMemImm8Offset() const {
1034    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1035      return false;
1036    // Base reg of PC isn't allowed for these encodings.
1037    if (Memory.BaseRegNum == ARM::PC) return false;
1038    // Immediate offset in range [-255, 255].
1039    if (!Memory.OffsetImm) return true;
1040    int64_t Val = Memory.OffsetImm->getValue();
1041    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1042  }
1043  bool isMemPosImm8Offset() const {
1044    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1045      return false;
1046    // Immediate offset in range [0, 255].
1047    if (!Memory.OffsetImm) return true;
1048    int64_t Val = Memory.OffsetImm->getValue();
1049    return Val >= 0 && Val < 256;
1050  }
1051  bool isMemNegImm8Offset() const {
1052    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1053      return false;
1054    // Base reg of PC isn't allowed for these encodings.
1055    if (Memory.BaseRegNum == ARM::PC) return false;
1056    // Immediate offset in range [-255, -1].
1057    if (!Memory.OffsetImm) return false;
1058    int64_t Val = Memory.OffsetImm->getValue();
1059    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1060  }
1061  bool isMemUImm12Offset() const {
1062    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1063      return false;
1064    // Immediate offset in range [0, 4095].
1065    if (!Memory.OffsetImm) return true;
1066    int64_t Val = Memory.OffsetImm->getValue();
1067    return (Val >= 0 && Val < 4096);
1068  }
1069  bool isMemImm12Offset() const {
1070    // If we have an immediate that's not a constant, treat it as a label
1071    // reference needing a fixup. If it is a constant, it's something else
1072    // and we reject it.
1073    if (isImm() && !isa<MCConstantExpr>(getImm()))
1074      return true;
1075
1076    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1077      return false;
1078    // Immediate offset in range [-4095, 4095].
1079    if (!Memory.OffsetImm) return true;
1080    int64_t Val = Memory.OffsetImm->getValue();
1081    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1082  }
1083  bool isPostIdxImm8() const {
1084    if (!isImm()) return false;
1085    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1086    if (!CE) return false;
1087    int64_t Val = CE->getValue();
1088    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1089  }
1090  bool isPostIdxImm8s4() const {
1091    if (!isImm()) return false;
1092    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1093    if (!CE) return false;
1094    int64_t Val = CE->getValue();
1095    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1096      (Val == INT32_MIN);
1097  }
1098
1099  bool isMSRMask() const { return Kind == k_MSRMask; }
1100  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1101
1102  // NEON operands.
1103  bool isSingleSpacedVectorList() const {
1104    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1105  }
1106  bool isDoubleSpacedVectorList() const {
1107    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1108  }
1109  bool isVecListOneD() const {
1110    if (!isSingleSpacedVectorList()) return false;
1111    return VectorList.Count == 1;
1112  }
1113
1114  bool isVecListDPair() const {
1115    if (!isSingleSpacedVectorList()) return false;
1116    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1117              .contains(VectorList.RegNum));
1118  }
1119
1120  bool isVecListThreeD() const {
1121    if (!isSingleSpacedVectorList()) return false;
1122    return VectorList.Count == 3;
1123  }
1124
1125  bool isVecListFourD() const {
1126    if (!isSingleSpacedVectorList()) return false;
1127    return VectorList.Count == 4;
1128  }
1129
1130  bool isVecListDPairSpaced() const {
1131    if (isSingleSpacedVectorList()) return false;
1132    return (ARMMCRegisterClasses[ARM::DPairSpcRegClassID]
1133              .contains(VectorList.RegNum));
1134  }
1135
1136  bool isVecListThreeQ() const {
1137    if (!isDoubleSpacedVectorList()) return false;
1138    return VectorList.Count == 3;
1139  }
1140
1141  bool isVecListFourQ() const {
1142    if (!isDoubleSpacedVectorList()) return false;
1143    return VectorList.Count == 4;
1144  }
1145
1146  bool isSingleSpacedVectorAllLanes() const {
1147    return Kind == k_VectorListAllLanes && !VectorList.isDoubleSpaced;
1148  }
1149  bool isDoubleSpacedVectorAllLanes() const {
1150    return Kind == k_VectorListAllLanes && VectorList.isDoubleSpaced;
1151  }
1152  bool isVecListOneDAllLanes() const {
1153    if (!isSingleSpacedVectorAllLanes()) return false;
1154    return VectorList.Count == 1;
1155  }
1156
1157  bool isVecListDPairAllLanes() const {
1158    if (!isSingleSpacedVectorAllLanes()) return false;
1159    return (ARMMCRegisterClasses[ARM::DPairRegClassID]
1160              .contains(VectorList.RegNum));
1161  }
1162
1163  bool isVecListDPairSpacedAllLanes() const {
1164    if (!isDoubleSpacedVectorAllLanes()) return false;
1165    return VectorList.Count == 2;
1166  }
1167
1168  bool isVecListThreeDAllLanes() const {
1169    if (!isSingleSpacedVectorAllLanes()) return false;
1170    return VectorList.Count == 3;
1171  }
1172
1173  bool isVecListThreeQAllLanes() const {
1174    if (!isDoubleSpacedVectorAllLanes()) return false;
1175    return VectorList.Count == 3;
1176  }
1177
1178  bool isVecListFourDAllLanes() const {
1179    if (!isSingleSpacedVectorAllLanes()) return false;
1180    return VectorList.Count == 4;
1181  }
1182
1183  bool isVecListFourQAllLanes() const {
1184    if (!isDoubleSpacedVectorAllLanes()) return false;
1185    return VectorList.Count == 4;
1186  }
1187
1188  bool isSingleSpacedVectorIndexed() const {
1189    return Kind == k_VectorListIndexed && !VectorList.isDoubleSpaced;
1190  }
1191  bool isDoubleSpacedVectorIndexed() const {
1192    return Kind == k_VectorListIndexed && VectorList.isDoubleSpaced;
1193  }
1194  bool isVecListOneDByteIndexed() const {
1195    if (!isSingleSpacedVectorIndexed()) return false;
1196    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1197  }
1198
1199  bool isVecListOneDHWordIndexed() const {
1200    if (!isSingleSpacedVectorIndexed()) return false;
1201    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1202  }
1203
1204  bool isVecListOneDWordIndexed() const {
1205    if (!isSingleSpacedVectorIndexed()) return false;
1206    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1207  }
1208
1209  bool isVecListTwoDByteIndexed() const {
1210    if (!isSingleSpacedVectorIndexed()) return false;
1211    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1212  }
1213
1214  bool isVecListTwoDHWordIndexed() const {
1215    if (!isSingleSpacedVectorIndexed()) return false;
1216    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1217  }
1218
1219  bool isVecListTwoQWordIndexed() const {
1220    if (!isDoubleSpacedVectorIndexed()) return false;
1221    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1222  }
1223
1224  bool isVecListTwoQHWordIndexed() const {
1225    if (!isDoubleSpacedVectorIndexed()) return false;
1226    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1227  }
1228
1229  bool isVecListTwoDWordIndexed() const {
1230    if (!isSingleSpacedVectorIndexed()) return false;
1231    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1232  }
1233
1234  bool isVecListThreeDByteIndexed() const {
1235    if (!isSingleSpacedVectorIndexed()) return false;
1236    return VectorList.Count == 3 && VectorList.LaneIndex <= 7;
1237  }
1238
1239  bool isVecListThreeDHWordIndexed() const {
1240    if (!isSingleSpacedVectorIndexed()) return false;
1241    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1242  }
1243
1244  bool isVecListThreeQWordIndexed() const {
1245    if (!isDoubleSpacedVectorIndexed()) return false;
1246    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1247  }
1248
1249  bool isVecListThreeQHWordIndexed() const {
1250    if (!isDoubleSpacedVectorIndexed()) return false;
1251    return VectorList.Count == 3 && VectorList.LaneIndex <= 3;
1252  }
1253
1254  bool isVecListThreeDWordIndexed() const {
1255    if (!isSingleSpacedVectorIndexed()) return false;
1256    return VectorList.Count == 3 && VectorList.LaneIndex <= 1;
1257  }
1258
1259  bool isVecListFourDByteIndexed() const {
1260    if (!isSingleSpacedVectorIndexed()) return false;
1261    return VectorList.Count == 4 && VectorList.LaneIndex <= 7;
1262  }
1263
1264  bool isVecListFourDHWordIndexed() const {
1265    if (!isSingleSpacedVectorIndexed()) return false;
1266    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1267  }
1268
1269  bool isVecListFourQWordIndexed() const {
1270    if (!isDoubleSpacedVectorIndexed()) return false;
1271    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1272  }
1273
1274  bool isVecListFourQHWordIndexed() const {
1275    if (!isDoubleSpacedVectorIndexed()) return false;
1276    return VectorList.Count == 4 && VectorList.LaneIndex <= 3;
1277  }
1278
1279  bool isVecListFourDWordIndexed() const {
1280    if (!isSingleSpacedVectorIndexed()) return false;
1281    return VectorList.Count == 4 && VectorList.LaneIndex <= 1;
1282  }
1283
1284  bool isVectorIndex8() const {
1285    if (Kind != k_VectorIndex) return false;
1286    return VectorIndex.Val < 8;
1287  }
1288  bool isVectorIndex16() const {
1289    if (Kind != k_VectorIndex) return false;
1290    return VectorIndex.Val < 4;
1291  }
1292  bool isVectorIndex32() const {
1293    if (Kind != k_VectorIndex) return false;
1294    return VectorIndex.Val < 2;
1295  }
1296
1297  bool isNEONi8splat() const {
1298    if (!isImm()) return false;
1299    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1300    // Must be a constant.
1301    if (!CE) return false;
1302    int64_t Value = CE->getValue();
1303    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1304    // value.
1305    return Value >= 0 && Value < 256;
1306  }
1307
1308  bool isNEONi16splat() const {
1309    if (!isImm()) return false;
1310    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1311    // Must be a constant.
1312    if (!CE) return false;
1313    int64_t Value = CE->getValue();
1314    // i16 value in the range [0,255] or [0x0100, 0xff00]
1315    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1316  }
1317
1318  bool isNEONi32splat() const {
1319    if (!isImm()) return false;
1320    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1321    // Must be a constant.
1322    if (!CE) return false;
1323    int64_t Value = CE->getValue();
1324    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1325    return (Value >= 0 && Value < 256) ||
1326      (Value >= 0x0100 && Value <= 0xff00) ||
1327      (Value >= 0x010000 && Value <= 0xff0000) ||
1328      (Value >= 0x01000000 && Value <= 0xff000000);
1329  }
1330
1331  bool isNEONi32vmov() const {
1332    if (!isImm()) return false;
1333    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1334    // Must be a constant.
1335    if (!CE) return false;
1336    int64_t Value = CE->getValue();
1337    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1338    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1339    return (Value >= 0 && Value < 256) ||
1340      (Value >= 0x0100 && Value <= 0xff00) ||
1341      (Value >= 0x010000 && Value <= 0xff0000) ||
1342      (Value >= 0x01000000 && Value <= 0xff000000) ||
1343      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1344      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1345  }
1346  bool isNEONi32vmovNeg() const {
1347    if (!isImm()) return false;
1348    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1349    // Must be a constant.
1350    if (!CE) return false;
1351    int64_t Value = ~CE->getValue();
1352    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1353    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1354    return (Value >= 0 && Value < 256) ||
1355      (Value >= 0x0100 && Value <= 0xff00) ||
1356      (Value >= 0x010000 && Value <= 0xff0000) ||
1357      (Value >= 0x01000000 && Value <= 0xff000000) ||
1358      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1359      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1360  }
1361
1362  bool isNEONi64splat() const {
1363    if (!isImm()) return false;
1364    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1365    // Must be a constant.
1366    if (!CE) return false;
1367    uint64_t Value = CE->getValue();
1368    // i64 value with each byte being either 0 or 0xff.
1369    for (unsigned i = 0; i < 8; ++i)
1370      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1371    return true;
1372  }
1373
1374  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1375    // Add as immediates when possible.  Null MCExpr = 0.
1376    if (Expr == 0)
1377      Inst.addOperand(MCOperand::CreateImm(0));
1378    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1379      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1380    else
1381      Inst.addOperand(MCOperand::CreateExpr(Expr));
1382  }
1383
1384  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1385    assert(N == 2 && "Invalid number of operands!");
1386    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1387    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1388    Inst.addOperand(MCOperand::CreateReg(RegNum));
1389  }
1390
1391  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1392    assert(N == 1 && "Invalid number of operands!");
1393    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1394  }
1395
1396  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1397    assert(N == 1 && "Invalid number of operands!");
1398    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1399  }
1400
1401  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1402    assert(N == 1 && "Invalid number of operands!");
1403    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1404  }
1405
1406  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1407    assert(N == 1 && "Invalid number of operands!");
1408    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1409  }
1410
1411  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1412    assert(N == 1 && "Invalid number of operands!");
1413    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1414  }
1415
1416  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1417    assert(N == 1 && "Invalid number of operands!");
1418    Inst.addOperand(MCOperand::CreateReg(getReg()));
1419  }
1420
1421  void addRegOperands(MCInst &Inst, unsigned N) const {
1422    assert(N == 1 && "Invalid number of operands!");
1423    Inst.addOperand(MCOperand::CreateReg(getReg()));
1424  }
1425
1426  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1427    assert(N == 3 && "Invalid number of operands!");
1428    assert(isRegShiftedReg() &&
1429           "addRegShiftedRegOperands() on non RegShiftedReg!");
1430    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1431    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1432    Inst.addOperand(MCOperand::CreateImm(
1433      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1434  }
1435
1436  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1437    assert(N == 2 && "Invalid number of operands!");
1438    assert(isRegShiftedImm() &&
1439           "addRegShiftedImmOperands() on non RegShiftedImm!");
1440    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1441    Inst.addOperand(MCOperand::CreateImm(
1442      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1443  }
1444
1445  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1446    assert(N == 1 && "Invalid number of operands!");
1447    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1448                                         ShifterImm.Imm));
1449  }
1450
1451  void addRegListOperands(MCInst &Inst, unsigned N) const {
1452    assert(N == 1 && "Invalid number of operands!");
1453    const SmallVectorImpl<unsigned> &RegList = getRegList();
1454    for (SmallVectorImpl<unsigned>::const_iterator
1455           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1456      Inst.addOperand(MCOperand::CreateReg(*I));
1457  }
1458
1459  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1460    addRegListOperands(Inst, N);
1461  }
1462
1463  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1464    addRegListOperands(Inst, N);
1465  }
1466
1467  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1468    assert(N == 1 && "Invalid number of operands!");
1469    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1470    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1471  }
1472
1473  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1474    assert(N == 1 && "Invalid number of operands!");
1475    // Munge the lsb/width into a bitfield mask.
1476    unsigned lsb = Bitfield.LSB;
1477    unsigned width = Bitfield.Width;
1478    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1479    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1480                      (32 - (lsb + width)));
1481    Inst.addOperand(MCOperand::CreateImm(Mask));
1482  }
1483
1484  void addImmOperands(MCInst &Inst, unsigned N) const {
1485    assert(N == 1 && "Invalid number of operands!");
1486    addExpr(Inst, getImm());
1487  }
1488
1489  void addFBits16Operands(MCInst &Inst, unsigned N) const {
1490    assert(N == 1 && "Invalid number of operands!");
1491    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1492    Inst.addOperand(MCOperand::CreateImm(16 - CE->getValue()));
1493  }
1494
1495  void addFBits32Operands(MCInst &Inst, unsigned N) const {
1496    assert(N == 1 && "Invalid number of operands!");
1497    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1498    Inst.addOperand(MCOperand::CreateImm(32 - CE->getValue()));
1499  }
1500
1501  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1502    assert(N == 1 && "Invalid number of operands!");
1503    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1504    int Val = ARM_AM::getFP32Imm(APInt(32, CE->getValue()));
1505    Inst.addOperand(MCOperand::CreateImm(Val));
1506  }
1507
1508  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1509    assert(N == 1 && "Invalid number of operands!");
1510    // FIXME: We really want to scale the value here, but the LDRD/STRD
1511    // instruction don't encode operands that way yet.
1512    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1513    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1514  }
1515
1516  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1517    assert(N == 1 && "Invalid number of operands!");
1518    // The immediate is scaled by four in the encoding and is stored
1519    // in the MCInst as such. Lop off the low two bits here.
1520    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1521    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1522  }
1523
1524  void addImm0_508s4NegOperands(MCInst &Inst, unsigned N) const {
1525    assert(N == 1 && "Invalid number of operands!");
1526    // The immediate is scaled by four in the encoding and is stored
1527    // in the MCInst as such. Lop off the low two bits here.
1528    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1529    Inst.addOperand(MCOperand::CreateImm(-(CE->getValue() / 4)));
1530  }
1531
1532  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1533    assert(N == 1 && "Invalid number of operands!");
1534    // The immediate is scaled by four in the encoding and is stored
1535    // in the MCInst as such. Lop off the low two bits here.
1536    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1537    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1538  }
1539
1540  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1541    assert(N == 1 && "Invalid number of operands!");
1542    // The constant encodes as the immediate-1, and we store in the instruction
1543    // the bits as encoded, so subtract off one here.
1544    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1545    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1546  }
1547
1548  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1549    assert(N == 1 && "Invalid number of operands!");
1550    // The constant encodes as the immediate-1, and we store in the instruction
1551    // the bits as encoded, so subtract off one here.
1552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1553    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1554  }
1555
1556  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1557    assert(N == 1 && "Invalid number of operands!");
1558    // The constant encodes as the immediate, except for 32, which encodes as
1559    // zero.
1560    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1561    unsigned Imm = CE->getValue();
1562    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1563  }
1564
1565  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1566    assert(N == 1 && "Invalid number of operands!");
1567    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1568    // the instruction as well.
1569    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1570    int Val = CE->getValue();
1571    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1572  }
1573
1574  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1575    assert(N == 1 && "Invalid number of operands!");
1576    // The operand is actually a t2_so_imm, but we have its bitwise
1577    // negation in the assembly source, so twiddle it here.
1578    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1579    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1580  }
1581
1582  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1583    assert(N == 1 && "Invalid number of operands!");
1584    // The operand is actually a t2_so_imm, but we have its
1585    // negation in the assembly source, so twiddle it here.
1586    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1587    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1588  }
1589
1590  void addImm0_4095NegOperands(MCInst &Inst, unsigned N) const {
1591    assert(N == 1 && "Invalid number of operands!");
1592    // The operand is actually an imm0_4095, but we have its
1593    // negation in the assembly source, so twiddle it here.
1594    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1595    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1596  }
1597
1598  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1599    assert(N == 1 && "Invalid number of operands!");
1600    // The operand is actually a so_imm, but we have its bitwise
1601    // negation in the assembly source, so twiddle it here.
1602    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1603    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1604  }
1605
1606  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1607    assert(N == 1 && "Invalid number of operands!");
1608    // The operand is actually a so_imm, but we have its
1609    // negation in the assembly source, so twiddle it here.
1610    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1611    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1612  }
1613
1614  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1615    assert(N == 1 && "Invalid number of operands!");
1616    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1617  }
1618
1619  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1620    assert(N == 1 && "Invalid number of operands!");
1621    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1622  }
1623
1624  void addMemPCRelImm12Operands(MCInst &Inst, unsigned N) const {
1625    assert(N == 1 && "Invalid number of operands!");
1626    int32_t Imm = Memory.OffsetImm->getValue();
1627    // FIXME: Handle #-0
1628    if (Imm == INT32_MIN) Imm = 0;
1629    Inst.addOperand(MCOperand::CreateImm(Imm));
1630  }
1631
1632  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1633    assert(N == 2 && "Invalid number of operands!");
1634    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1635    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1636  }
1637
1638  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1639    assert(N == 3 && "Invalid number of operands!");
1640    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1641    if (!Memory.OffsetRegNum) {
1642      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1643      // Special case for #-0
1644      if (Val == INT32_MIN) Val = 0;
1645      if (Val < 0) Val = -Val;
1646      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1647    } else {
1648      // For register offset, we encode the shift type and negation flag
1649      // here.
1650      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1651                              Memory.ShiftImm, Memory.ShiftType);
1652    }
1653    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1654    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1655    Inst.addOperand(MCOperand::CreateImm(Val));
1656  }
1657
1658  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1659    assert(N == 2 && "Invalid number of operands!");
1660    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1661    assert(CE && "non-constant AM2OffsetImm operand!");
1662    int32_t Val = CE->getValue();
1663    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1664    // Special case for #-0
1665    if (Val == INT32_MIN) Val = 0;
1666    if (Val < 0) Val = -Val;
1667    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1668    Inst.addOperand(MCOperand::CreateReg(0));
1669    Inst.addOperand(MCOperand::CreateImm(Val));
1670  }
1671
1672  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1673    assert(N == 3 && "Invalid number of operands!");
1674    // If we have an immediate that's not a constant, treat it as a label
1675    // reference needing a fixup. If it is a constant, it's something else
1676    // and we reject it.
1677    if (isImm()) {
1678      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1679      Inst.addOperand(MCOperand::CreateReg(0));
1680      Inst.addOperand(MCOperand::CreateImm(0));
1681      return;
1682    }
1683
1684    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1685    if (!Memory.OffsetRegNum) {
1686      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1687      // Special case for #-0
1688      if (Val == INT32_MIN) Val = 0;
1689      if (Val < 0) Val = -Val;
1690      Val = ARM_AM::getAM3Opc(AddSub, Val);
1691    } else {
1692      // For register offset, we encode the shift type and negation flag
1693      // here.
1694      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1695    }
1696    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1697    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1698    Inst.addOperand(MCOperand::CreateImm(Val));
1699  }
1700
1701  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1702    assert(N == 2 && "Invalid number of operands!");
1703    if (Kind == k_PostIndexRegister) {
1704      int32_t Val =
1705        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1706      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1707      Inst.addOperand(MCOperand::CreateImm(Val));
1708      return;
1709    }
1710
1711    // Constant offset.
1712    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1713    int32_t Val = CE->getValue();
1714    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1715    // Special case for #-0
1716    if (Val == INT32_MIN) Val = 0;
1717    if (Val < 0) Val = -Val;
1718    Val = ARM_AM::getAM3Opc(AddSub, Val);
1719    Inst.addOperand(MCOperand::CreateReg(0));
1720    Inst.addOperand(MCOperand::CreateImm(Val));
1721  }
1722
1723  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1724    assert(N == 2 && "Invalid number of operands!");
1725    // If we have an immediate that's not a constant, treat it as a label
1726    // reference needing a fixup. If it is a constant, it's something else
1727    // and we reject it.
1728    if (isImm()) {
1729      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1730      Inst.addOperand(MCOperand::CreateImm(0));
1731      return;
1732    }
1733
1734    // The lower two bits are always zero and as such are not encoded.
1735    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1736    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1737    // Special case for #-0
1738    if (Val == INT32_MIN) Val = 0;
1739    if (Val < 0) Val = -Val;
1740    Val = ARM_AM::getAM5Opc(AddSub, Val);
1741    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1742    Inst.addOperand(MCOperand::CreateImm(Val));
1743  }
1744
1745  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1746    assert(N == 2 && "Invalid number of operands!");
1747    // If we have an immediate that's not a constant, treat it as a label
1748    // reference needing a fixup. If it is a constant, it's something else
1749    // and we reject it.
1750    if (isImm()) {
1751      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1752      Inst.addOperand(MCOperand::CreateImm(0));
1753      return;
1754    }
1755
1756    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1757    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1758    Inst.addOperand(MCOperand::CreateImm(Val));
1759  }
1760
1761  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1762    assert(N == 2 && "Invalid number of operands!");
1763    // The lower two bits are always zero and as such are not encoded.
1764    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1765    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1766    Inst.addOperand(MCOperand::CreateImm(Val));
1767  }
1768
1769  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1770    assert(N == 2 && "Invalid number of operands!");
1771    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1772    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1773    Inst.addOperand(MCOperand::CreateImm(Val));
1774  }
1775
1776  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1777    addMemImm8OffsetOperands(Inst, N);
1778  }
1779
1780  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1781    addMemImm8OffsetOperands(Inst, N);
1782  }
1783
1784  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1785    assert(N == 2 && "Invalid number of operands!");
1786    // If this is an immediate, it's a label reference.
1787    if (isImm()) {
1788      addExpr(Inst, getImm());
1789      Inst.addOperand(MCOperand::CreateImm(0));
1790      return;
1791    }
1792
1793    // Otherwise, it's a normal memory reg+offset.
1794    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1795    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1796    Inst.addOperand(MCOperand::CreateImm(Val));
1797  }
1798
1799  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1800    assert(N == 2 && "Invalid number of operands!");
1801    // If this is an immediate, it's a label reference.
1802    if (isImm()) {
1803      addExpr(Inst, getImm());
1804      Inst.addOperand(MCOperand::CreateImm(0));
1805      return;
1806    }
1807
1808    // Otherwise, it's a normal memory reg+offset.
1809    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1810    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1811    Inst.addOperand(MCOperand::CreateImm(Val));
1812  }
1813
1814  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1815    assert(N == 2 && "Invalid number of operands!");
1816    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1817    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1818  }
1819
1820  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1821    assert(N == 2 && "Invalid number of operands!");
1822    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1823    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1824  }
1825
1826  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1827    assert(N == 3 && "Invalid number of operands!");
1828    unsigned Val =
1829      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1830                        Memory.ShiftImm, Memory.ShiftType);
1831    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1832    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1833    Inst.addOperand(MCOperand::CreateImm(Val));
1834  }
1835
1836  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1837    assert(N == 3 && "Invalid number of operands!");
1838    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1839    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1840    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1841  }
1842
1843  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1844    assert(N == 2 && "Invalid number of operands!");
1845    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1846    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1847  }
1848
1849  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1850    assert(N == 2 && "Invalid number of operands!");
1851    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1852    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1853    Inst.addOperand(MCOperand::CreateImm(Val));
1854  }
1855
1856  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1857    assert(N == 2 && "Invalid number of operands!");
1858    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1859    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1860    Inst.addOperand(MCOperand::CreateImm(Val));
1861  }
1862
1863  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1864    assert(N == 2 && "Invalid number of operands!");
1865    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1866    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1867    Inst.addOperand(MCOperand::CreateImm(Val));
1868  }
1869
1870  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1871    assert(N == 2 && "Invalid number of operands!");
1872    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1873    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1874    Inst.addOperand(MCOperand::CreateImm(Val));
1875  }
1876
1877  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1878    assert(N == 1 && "Invalid number of operands!");
1879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1880    assert(CE && "non-constant post-idx-imm8 operand!");
1881    int Imm = CE->getValue();
1882    bool isAdd = Imm >= 0;
1883    if (Imm == INT32_MIN) Imm = 0;
1884    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1885    Inst.addOperand(MCOperand::CreateImm(Imm));
1886  }
1887
1888  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1889    assert(N == 1 && "Invalid number of operands!");
1890    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1891    assert(CE && "non-constant post-idx-imm8s4 operand!");
1892    int Imm = CE->getValue();
1893    bool isAdd = Imm >= 0;
1894    if (Imm == INT32_MIN) Imm = 0;
1895    // Immediate is scaled by 4.
1896    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1897    Inst.addOperand(MCOperand::CreateImm(Imm));
1898  }
1899
1900  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1901    assert(N == 2 && "Invalid number of operands!");
1902    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1903    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1904  }
1905
1906  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1907    assert(N == 2 && "Invalid number of operands!");
1908    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1909    // The sign, shift type, and shift amount are encoded in a single operand
1910    // using the AM2 encoding helpers.
1911    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1912    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1913                                     PostIdxReg.ShiftTy);
1914    Inst.addOperand(MCOperand::CreateImm(Imm));
1915  }
1916
1917  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1918    assert(N == 1 && "Invalid number of operands!");
1919    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1920  }
1921
1922  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1923    assert(N == 1 && "Invalid number of operands!");
1924    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1925  }
1926
1927  void addVecListOperands(MCInst &Inst, unsigned N) const {
1928    assert(N == 1 && "Invalid number of operands!");
1929    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1930  }
1931
1932  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1933    assert(N == 2 && "Invalid number of operands!");
1934    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1935    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1936  }
1937
1938  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1939    assert(N == 1 && "Invalid number of operands!");
1940    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1941  }
1942
1943  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1944    assert(N == 1 && "Invalid number of operands!");
1945    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1946  }
1947
1948  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1949    assert(N == 1 && "Invalid number of operands!");
1950    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1951  }
1952
1953  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1954    assert(N == 1 && "Invalid number of operands!");
1955    // The immediate encodes the type of constant as well as the value.
1956    // Mask in that this is an i8 splat.
1957    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1958    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1959  }
1960
1961  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1962    assert(N == 1 && "Invalid number of operands!");
1963    // The immediate encodes the type of constant as well as the value.
1964    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1965    unsigned Value = CE->getValue();
1966    if (Value >= 256)
1967      Value = (Value >> 8) | 0xa00;
1968    else
1969      Value |= 0x800;
1970    Inst.addOperand(MCOperand::CreateImm(Value));
1971  }
1972
1973  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1974    assert(N == 1 && "Invalid number of operands!");
1975    // The immediate encodes the type of constant as well as the value.
1976    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1977    unsigned Value = CE->getValue();
1978    if (Value >= 256 && Value <= 0xff00)
1979      Value = (Value >> 8) | 0x200;
1980    else if (Value > 0xffff && Value <= 0xff0000)
1981      Value = (Value >> 16) | 0x400;
1982    else if (Value > 0xffffff)
1983      Value = (Value >> 24) | 0x600;
1984    Inst.addOperand(MCOperand::CreateImm(Value));
1985  }
1986
1987  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1988    assert(N == 1 && "Invalid number of operands!");
1989    // The immediate encodes the type of constant as well as the value.
1990    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1991    unsigned Value = CE->getValue();
1992    if (Value >= 256 && Value <= 0xffff)
1993      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1994    else if (Value > 0xffff && Value <= 0xffffff)
1995      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1996    else if (Value > 0xffffff)
1997      Value = (Value >> 24) | 0x600;
1998    Inst.addOperand(MCOperand::CreateImm(Value));
1999  }
2000
2001  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
2002    assert(N == 1 && "Invalid number of operands!");
2003    // The immediate encodes the type of constant as well as the value.
2004    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2005    unsigned Value = ~CE->getValue();
2006    if (Value >= 256 && Value <= 0xffff)
2007      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
2008    else if (Value > 0xffff && Value <= 0xffffff)
2009      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
2010    else if (Value > 0xffffff)
2011      Value = (Value >> 24) | 0x600;
2012    Inst.addOperand(MCOperand::CreateImm(Value));
2013  }
2014
2015  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
2016    assert(N == 1 && "Invalid number of operands!");
2017    // The immediate encodes the type of constant as well as the value.
2018    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
2019    uint64_t Value = CE->getValue();
2020    unsigned Imm = 0;
2021    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
2022      Imm |= (Value & 1) << i;
2023    }
2024    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
2025  }
2026
2027  virtual void print(raw_ostream &OS) const;
2028
2029  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
2030    ARMOperand *Op = new ARMOperand(k_ITCondMask);
2031    Op->ITMask.Mask = Mask;
2032    Op->StartLoc = S;
2033    Op->EndLoc = S;
2034    return Op;
2035  }
2036
2037  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
2038    ARMOperand *Op = new ARMOperand(k_CondCode);
2039    Op->CC.Val = CC;
2040    Op->StartLoc = S;
2041    Op->EndLoc = S;
2042    return Op;
2043  }
2044
2045  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
2046    ARMOperand *Op = new ARMOperand(k_CoprocNum);
2047    Op->Cop.Val = CopVal;
2048    Op->StartLoc = S;
2049    Op->EndLoc = S;
2050    return Op;
2051  }
2052
2053  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
2054    ARMOperand *Op = new ARMOperand(k_CoprocReg);
2055    Op->Cop.Val = CopVal;
2056    Op->StartLoc = S;
2057    Op->EndLoc = S;
2058    return Op;
2059  }
2060
2061  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
2062    ARMOperand *Op = new ARMOperand(k_CoprocOption);
2063    Op->Cop.Val = Val;
2064    Op->StartLoc = S;
2065    Op->EndLoc = E;
2066    return Op;
2067  }
2068
2069  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
2070    ARMOperand *Op = new ARMOperand(k_CCOut);
2071    Op->Reg.RegNum = RegNum;
2072    Op->StartLoc = S;
2073    Op->EndLoc = S;
2074    return Op;
2075  }
2076
2077  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
2078    ARMOperand *Op = new ARMOperand(k_Token);
2079    Op->Tok.Data = Str.data();
2080    Op->Tok.Length = Str.size();
2081    Op->StartLoc = S;
2082    Op->EndLoc = S;
2083    return Op;
2084  }
2085
2086  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
2087    ARMOperand *Op = new ARMOperand(k_Register);
2088    Op->Reg.RegNum = RegNum;
2089    Op->StartLoc = S;
2090    Op->EndLoc = E;
2091    return Op;
2092  }
2093
2094  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
2095                                           unsigned SrcReg,
2096                                           unsigned ShiftReg,
2097                                           unsigned ShiftImm,
2098                                           SMLoc S, SMLoc E) {
2099    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
2100    Op->RegShiftedReg.ShiftTy = ShTy;
2101    Op->RegShiftedReg.SrcReg = SrcReg;
2102    Op->RegShiftedReg.ShiftReg = ShiftReg;
2103    Op->RegShiftedReg.ShiftImm = ShiftImm;
2104    Op->StartLoc = S;
2105    Op->EndLoc = E;
2106    return Op;
2107  }
2108
2109  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
2110                                            unsigned SrcReg,
2111                                            unsigned ShiftImm,
2112                                            SMLoc S, SMLoc E) {
2113    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
2114    Op->RegShiftedImm.ShiftTy = ShTy;
2115    Op->RegShiftedImm.SrcReg = SrcReg;
2116    Op->RegShiftedImm.ShiftImm = ShiftImm;
2117    Op->StartLoc = S;
2118    Op->EndLoc = E;
2119    return Op;
2120  }
2121
2122  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
2123                                   SMLoc S, SMLoc E) {
2124    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
2125    Op->ShifterImm.isASR = isASR;
2126    Op->ShifterImm.Imm = Imm;
2127    Op->StartLoc = S;
2128    Op->EndLoc = E;
2129    return Op;
2130  }
2131
2132  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
2133    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
2134    Op->RotImm.Imm = Imm;
2135    Op->StartLoc = S;
2136    Op->EndLoc = E;
2137    return Op;
2138  }
2139
2140  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
2141                                    SMLoc S, SMLoc E) {
2142    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
2143    Op->Bitfield.LSB = LSB;
2144    Op->Bitfield.Width = Width;
2145    Op->StartLoc = S;
2146    Op->EndLoc = E;
2147    return Op;
2148  }
2149
2150  static ARMOperand *
2151  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
2152                SMLoc StartLoc, SMLoc EndLoc) {
2153    KindTy Kind = k_RegisterList;
2154
2155    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
2156      Kind = k_DPRRegisterList;
2157    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2158             contains(Regs.front().first))
2159      Kind = k_SPRRegisterList;
2160
2161    ARMOperand *Op = new ARMOperand(Kind);
2162    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2163           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2164      Op->Registers.push_back(I->first);
2165    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2166    Op->StartLoc = StartLoc;
2167    Op->EndLoc = EndLoc;
2168    return Op;
2169  }
2170
2171  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2172                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2173    ARMOperand *Op = new ARMOperand(k_VectorList);
2174    Op->VectorList.RegNum = RegNum;
2175    Op->VectorList.Count = Count;
2176    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2177    Op->StartLoc = S;
2178    Op->EndLoc = E;
2179    return Op;
2180  }
2181
2182  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2183                                              bool isDoubleSpaced,
2184                                              SMLoc S, SMLoc E) {
2185    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2186    Op->VectorList.RegNum = RegNum;
2187    Op->VectorList.Count = Count;
2188    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2189    Op->StartLoc = S;
2190    Op->EndLoc = E;
2191    return Op;
2192  }
2193
2194  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2195                                             unsigned Index,
2196                                             bool isDoubleSpaced,
2197                                             SMLoc S, SMLoc E) {
2198    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2199    Op->VectorList.RegNum = RegNum;
2200    Op->VectorList.Count = Count;
2201    Op->VectorList.LaneIndex = Index;
2202    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2203    Op->StartLoc = S;
2204    Op->EndLoc = E;
2205    return Op;
2206  }
2207
2208  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2209                                       MCContext &Ctx) {
2210    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2211    Op->VectorIndex.Val = Idx;
2212    Op->StartLoc = S;
2213    Op->EndLoc = E;
2214    return Op;
2215  }
2216
2217  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2218    ARMOperand *Op = new ARMOperand(k_Immediate);
2219    Op->Imm.Val = Val;
2220    Op->StartLoc = S;
2221    Op->EndLoc = E;
2222    return Op;
2223  }
2224
2225  static ARMOperand *CreateMem(unsigned BaseRegNum,
2226                               const MCConstantExpr *OffsetImm,
2227                               unsigned OffsetRegNum,
2228                               ARM_AM::ShiftOpc ShiftType,
2229                               unsigned ShiftImm,
2230                               unsigned Alignment,
2231                               bool isNegative,
2232                               SMLoc S, SMLoc E) {
2233    ARMOperand *Op = new ARMOperand(k_Memory);
2234    Op->Memory.BaseRegNum = BaseRegNum;
2235    Op->Memory.OffsetImm = OffsetImm;
2236    Op->Memory.OffsetRegNum = OffsetRegNum;
2237    Op->Memory.ShiftType = ShiftType;
2238    Op->Memory.ShiftImm = ShiftImm;
2239    Op->Memory.Alignment = Alignment;
2240    Op->Memory.isNegative = isNegative;
2241    Op->StartLoc = S;
2242    Op->EndLoc = E;
2243    return Op;
2244  }
2245
2246  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2247                                      ARM_AM::ShiftOpc ShiftTy,
2248                                      unsigned ShiftImm,
2249                                      SMLoc S, SMLoc E) {
2250    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2251    Op->PostIdxReg.RegNum = RegNum;
2252    Op->PostIdxReg.isAdd = isAdd;
2253    Op->PostIdxReg.ShiftTy = ShiftTy;
2254    Op->PostIdxReg.ShiftImm = ShiftImm;
2255    Op->StartLoc = S;
2256    Op->EndLoc = E;
2257    return Op;
2258  }
2259
2260  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2261    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2262    Op->MBOpt.Val = Opt;
2263    Op->StartLoc = S;
2264    Op->EndLoc = S;
2265    return Op;
2266  }
2267
2268  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2269    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2270    Op->IFlags.Val = IFlags;
2271    Op->StartLoc = S;
2272    Op->EndLoc = S;
2273    return Op;
2274  }
2275
2276  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2277    ARMOperand *Op = new ARMOperand(k_MSRMask);
2278    Op->MMask.Val = MMask;
2279    Op->StartLoc = S;
2280    Op->EndLoc = S;
2281    return Op;
2282  }
2283};
2284
2285} // end anonymous namespace.
2286
2287void ARMOperand::print(raw_ostream &OS) const {
2288  switch (Kind) {
2289  case k_CondCode:
2290    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2291    break;
2292  case k_CCOut:
2293    OS << "<ccout " << getReg() << ">";
2294    break;
2295  case k_ITCondMask: {
2296    static const char *MaskStr[] = {
2297      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2298      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2299    };
2300    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2301    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2302    break;
2303  }
2304  case k_CoprocNum:
2305    OS << "<coprocessor number: " << getCoproc() << ">";
2306    break;
2307  case k_CoprocReg:
2308    OS << "<coprocessor register: " << getCoproc() << ">";
2309    break;
2310  case k_CoprocOption:
2311    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2312    break;
2313  case k_MSRMask:
2314    OS << "<mask: " << getMSRMask() << ">";
2315    break;
2316  case k_Immediate:
2317    getImm()->print(OS);
2318    break;
2319  case k_MemBarrierOpt:
2320    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2321    break;
2322  case k_Memory:
2323    OS << "<memory "
2324       << " base:" << Memory.BaseRegNum;
2325    OS << ">";
2326    break;
2327  case k_PostIndexRegister:
2328    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2329       << PostIdxReg.RegNum;
2330    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2331      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2332         << PostIdxReg.ShiftImm;
2333    OS << ">";
2334    break;
2335  case k_ProcIFlags: {
2336    OS << "<ARM_PROC::";
2337    unsigned IFlags = getProcIFlags();
2338    for (int i=2; i >= 0; --i)
2339      if (IFlags & (1 << i))
2340        OS << ARM_PROC::IFlagsToString(1 << i);
2341    OS << ">";
2342    break;
2343  }
2344  case k_Register:
2345    OS << "<register " << getReg() << ">";
2346    break;
2347  case k_ShifterImmediate:
2348    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2349       << " #" << ShifterImm.Imm << ">";
2350    break;
2351  case k_ShiftedRegister:
2352    OS << "<so_reg_reg "
2353       << RegShiftedReg.SrcReg << " "
2354       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2355       << " " << RegShiftedReg.ShiftReg << ">";
2356    break;
2357  case k_ShiftedImmediate:
2358    OS << "<so_reg_imm "
2359       << RegShiftedImm.SrcReg << " "
2360       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2361       << " #" << RegShiftedImm.ShiftImm << ">";
2362    break;
2363  case k_RotateImmediate:
2364    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2365    break;
2366  case k_BitfieldDescriptor:
2367    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2368       << ", width: " << Bitfield.Width << ">";
2369    break;
2370  case k_RegisterList:
2371  case k_DPRRegisterList:
2372  case k_SPRRegisterList: {
2373    OS << "<register_list ";
2374
2375    const SmallVectorImpl<unsigned> &RegList = getRegList();
2376    for (SmallVectorImpl<unsigned>::const_iterator
2377           I = RegList.begin(), E = RegList.end(); I != E; ) {
2378      OS << *I;
2379      if (++I < E) OS << ", ";
2380    }
2381
2382    OS << ">";
2383    break;
2384  }
2385  case k_VectorList:
2386    OS << "<vector_list " << VectorList.Count << " * "
2387       << VectorList.RegNum << ">";
2388    break;
2389  case k_VectorListAllLanes:
2390    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2391       << VectorList.RegNum << ">";
2392    break;
2393  case k_VectorListIndexed:
2394    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2395       << VectorList.Count << " * " << VectorList.RegNum << ">";
2396    break;
2397  case k_Token:
2398    OS << "'" << getToken() << "'";
2399    break;
2400  case k_VectorIndex:
2401    OS << "<vectorindex " << getVectorIndex() << ">";
2402    break;
2403  }
2404}
2405
2406/// @name Auto-generated Match Functions
2407/// {
2408
2409static unsigned MatchRegisterName(StringRef Name);
2410
2411/// }
2412
2413bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2414                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2415  StartLoc = Parser.getTok().getLoc();
2416  RegNo = tryParseRegister();
2417  EndLoc = Parser.getTok().getLoc();
2418
2419  return (RegNo == (unsigned)-1);
2420}
2421
2422/// Try to parse a register name.  The token must be an Identifier when called,
2423/// and if it is a register name the token is eaten and the register number is
2424/// returned.  Otherwise return -1.
2425///
2426int ARMAsmParser::tryParseRegister() {
2427  const AsmToken &Tok = Parser.getTok();
2428  if (Tok.isNot(AsmToken::Identifier)) return -1;
2429
2430  std::string lowerCase = Tok.getString().lower();
2431  unsigned RegNum = MatchRegisterName(lowerCase);
2432  if (!RegNum) {
2433    RegNum = StringSwitch<unsigned>(lowerCase)
2434      .Case("r13", ARM::SP)
2435      .Case("r14", ARM::LR)
2436      .Case("r15", ARM::PC)
2437      .Case("ip", ARM::R12)
2438      // Additional register name aliases for 'gas' compatibility.
2439      .Case("a1", ARM::R0)
2440      .Case("a2", ARM::R1)
2441      .Case("a3", ARM::R2)
2442      .Case("a4", ARM::R3)
2443      .Case("v1", ARM::R4)
2444      .Case("v2", ARM::R5)
2445      .Case("v3", ARM::R6)
2446      .Case("v4", ARM::R7)
2447      .Case("v5", ARM::R8)
2448      .Case("v6", ARM::R9)
2449      .Case("v7", ARM::R10)
2450      .Case("v8", ARM::R11)
2451      .Case("sb", ARM::R9)
2452      .Case("sl", ARM::R10)
2453      .Case("fp", ARM::R11)
2454      .Default(0);
2455  }
2456  if (!RegNum) {
2457    // Check for aliases registered via .req. Canonicalize to lower case.
2458    // That's more consistent since register names are case insensitive, and
2459    // it's how the original entry was passed in from MC/MCParser/AsmParser.
2460    StringMap<unsigned>::const_iterator Entry = RegisterReqs.find(lowerCase);
2461    // If no match, return failure.
2462    if (Entry == RegisterReqs.end())
2463      return -1;
2464    Parser.Lex(); // Eat identifier token.
2465    return Entry->getValue();
2466  }
2467
2468  Parser.Lex(); // Eat identifier token.
2469
2470  return RegNum;
2471}
2472
2473// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2474// If a recoverable error occurs, return 1. If an irrecoverable error
2475// occurs, return -1. An irrecoverable error is one where tokens have been
2476// consumed in the process of trying to parse the shifter (i.e., when it is
2477// indeed a shifter operand, but malformed).
2478int ARMAsmParser::tryParseShiftRegister(
2479                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2480  SMLoc S = Parser.getTok().getLoc();
2481  const AsmToken &Tok = Parser.getTok();
2482  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2483
2484  std::string lowerCase = Tok.getString().lower();
2485  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2486      .Case("asl", ARM_AM::lsl)
2487      .Case("lsl", ARM_AM::lsl)
2488      .Case("lsr", ARM_AM::lsr)
2489      .Case("asr", ARM_AM::asr)
2490      .Case("ror", ARM_AM::ror)
2491      .Case("rrx", ARM_AM::rrx)
2492      .Default(ARM_AM::no_shift);
2493
2494  if (ShiftTy == ARM_AM::no_shift)
2495    return 1;
2496
2497  Parser.Lex(); // Eat the operator.
2498
2499  // The source register for the shift has already been added to the
2500  // operand list, so we need to pop it off and combine it into the shifted
2501  // register operand instead.
2502  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2503  if (!PrevOp->isReg())
2504    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2505  int SrcReg = PrevOp->getReg();
2506  int64_t Imm = 0;
2507  int ShiftReg = 0;
2508  if (ShiftTy == ARM_AM::rrx) {
2509    // RRX Doesn't have an explicit shift amount. The encoder expects
2510    // the shift register to be the same as the source register. Seems odd,
2511    // but OK.
2512    ShiftReg = SrcReg;
2513  } else {
2514    // Figure out if this is shifted by a constant or a register (for non-RRX).
2515    if (Parser.getTok().is(AsmToken::Hash) ||
2516        Parser.getTok().is(AsmToken::Dollar)) {
2517      Parser.Lex(); // Eat hash.
2518      SMLoc ImmLoc = Parser.getTok().getLoc();
2519      const MCExpr *ShiftExpr = 0;
2520      if (getParser().ParseExpression(ShiftExpr)) {
2521        Error(ImmLoc, "invalid immediate shift value");
2522        return -1;
2523      }
2524      // The expression must be evaluatable as an immediate.
2525      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2526      if (!CE) {
2527        Error(ImmLoc, "invalid immediate shift value");
2528        return -1;
2529      }
2530      // Range check the immediate.
2531      // lsl, ror: 0 <= imm <= 31
2532      // lsr, asr: 0 <= imm <= 32
2533      Imm = CE->getValue();
2534      if (Imm < 0 ||
2535          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2536          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2537        Error(ImmLoc, "immediate shift value out of range");
2538        return -1;
2539      }
2540      // shift by zero is a nop. Always send it through as lsl.
2541      // ('as' compatibility)
2542      if (Imm == 0)
2543        ShiftTy = ARM_AM::lsl;
2544    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2545      ShiftReg = tryParseRegister();
2546      SMLoc L = Parser.getTok().getLoc();
2547      if (ShiftReg == -1) {
2548        Error (L, "expected immediate or register in shift operand");
2549        return -1;
2550      }
2551    } else {
2552      Error (Parser.getTok().getLoc(),
2553                    "expected immediate or register in shift operand");
2554      return -1;
2555    }
2556  }
2557
2558  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2559    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2560                                                         ShiftReg, Imm,
2561                                               S, Parser.getTok().getLoc()));
2562  else
2563    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2564                                               S, Parser.getTok().getLoc()));
2565
2566  return 0;
2567}
2568
2569
2570/// Try to parse a register name.  The token must be an Identifier when called.
2571/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2572/// if there is a "writeback". 'true' if it's not a register.
2573///
2574/// TODO this is likely to change to allow different register types and or to
2575/// parse for a specific register type.
2576bool ARMAsmParser::
2577tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2578  SMLoc S = Parser.getTok().getLoc();
2579  int RegNo = tryParseRegister();
2580  if (RegNo == -1)
2581    return true;
2582
2583  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2584
2585  const AsmToken &ExclaimTok = Parser.getTok();
2586  if (ExclaimTok.is(AsmToken::Exclaim)) {
2587    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2588                                               ExclaimTok.getLoc()));
2589    Parser.Lex(); // Eat exclaim token
2590    return false;
2591  }
2592
2593  // Also check for an index operand. This is only legal for vector registers,
2594  // but that'll get caught OK in operand matching, so we don't need to
2595  // explicitly filter everything else out here.
2596  if (Parser.getTok().is(AsmToken::LBrac)) {
2597    SMLoc SIdx = Parser.getTok().getLoc();
2598    Parser.Lex(); // Eat left bracket token.
2599
2600    const MCExpr *ImmVal;
2601    if (getParser().ParseExpression(ImmVal))
2602      return true;
2603    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2604    if (!MCE)
2605      return TokError("immediate value expected for vector index");
2606
2607    SMLoc E = Parser.getTok().getLoc();
2608    if (Parser.getTok().isNot(AsmToken::RBrac))
2609      return Error(E, "']' expected");
2610
2611    Parser.Lex(); // Eat right bracket token.
2612
2613    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2614                                                     SIdx, E,
2615                                                     getContext()));
2616  }
2617
2618  return false;
2619}
2620
2621/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2622/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2623/// "c5", ...
2624static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2625  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2626  // but efficient.
2627  switch (Name.size()) {
2628  default: return -1;
2629  case 2:
2630    if (Name[0] != CoprocOp)
2631      return -1;
2632    switch (Name[1]) {
2633    default:  return -1;
2634    case '0': return 0;
2635    case '1': return 1;
2636    case '2': return 2;
2637    case '3': return 3;
2638    case '4': return 4;
2639    case '5': return 5;
2640    case '6': return 6;
2641    case '7': return 7;
2642    case '8': return 8;
2643    case '9': return 9;
2644    }
2645  case 3:
2646    if (Name[0] != CoprocOp || Name[1] != '1')
2647      return -1;
2648    switch (Name[2]) {
2649    default:  return -1;
2650    case '0': return 10;
2651    case '1': return 11;
2652    case '2': return 12;
2653    case '3': return 13;
2654    case '4': return 14;
2655    case '5': return 15;
2656    }
2657  }
2658}
2659
2660/// parseITCondCode - Try to parse a condition code for an IT instruction.
2661ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2662parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2663  SMLoc S = Parser.getTok().getLoc();
2664  const AsmToken &Tok = Parser.getTok();
2665  if (!Tok.is(AsmToken::Identifier))
2666    return MatchOperand_NoMatch;
2667  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2668    .Case("eq", ARMCC::EQ)
2669    .Case("ne", ARMCC::NE)
2670    .Case("hs", ARMCC::HS)
2671    .Case("cs", ARMCC::HS)
2672    .Case("lo", ARMCC::LO)
2673    .Case("cc", ARMCC::LO)
2674    .Case("mi", ARMCC::MI)
2675    .Case("pl", ARMCC::PL)
2676    .Case("vs", ARMCC::VS)
2677    .Case("vc", ARMCC::VC)
2678    .Case("hi", ARMCC::HI)
2679    .Case("ls", ARMCC::LS)
2680    .Case("ge", ARMCC::GE)
2681    .Case("lt", ARMCC::LT)
2682    .Case("gt", ARMCC::GT)
2683    .Case("le", ARMCC::LE)
2684    .Case("al", ARMCC::AL)
2685    .Default(~0U);
2686  if (CC == ~0U)
2687    return MatchOperand_NoMatch;
2688  Parser.Lex(); // Eat the token.
2689
2690  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2691
2692  return MatchOperand_Success;
2693}
2694
2695/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2696/// token must be an Identifier when called, and if it is a coprocessor
2697/// number, the token is eaten and the operand is added to the operand list.
2698ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2699parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2700  SMLoc S = Parser.getTok().getLoc();
2701  const AsmToken &Tok = Parser.getTok();
2702  if (Tok.isNot(AsmToken::Identifier))
2703    return MatchOperand_NoMatch;
2704
2705  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2706  if (Num == -1)
2707    return MatchOperand_NoMatch;
2708
2709  Parser.Lex(); // Eat identifier token.
2710  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2711  return MatchOperand_Success;
2712}
2713
2714/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2715/// token must be an Identifier when called, and if it is a coprocessor
2716/// number, the token is eaten and the operand is added to the operand list.
2717ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2718parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2719  SMLoc S = Parser.getTok().getLoc();
2720  const AsmToken &Tok = Parser.getTok();
2721  if (Tok.isNot(AsmToken::Identifier))
2722    return MatchOperand_NoMatch;
2723
2724  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2725  if (Reg == -1)
2726    return MatchOperand_NoMatch;
2727
2728  Parser.Lex(); // Eat identifier token.
2729  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2730  return MatchOperand_Success;
2731}
2732
2733/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2734/// coproc_option : '{' imm0_255 '}'
2735ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2736parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2737  SMLoc S = Parser.getTok().getLoc();
2738
2739  // If this isn't a '{', this isn't a coprocessor immediate operand.
2740  if (Parser.getTok().isNot(AsmToken::LCurly))
2741    return MatchOperand_NoMatch;
2742  Parser.Lex(); // Eat the '{'
2743
2744  const MCExpr *Expr;
2745  SMLoc Loc = Parser.getTok().getLoc();
2746  if (getParser().ParseExpression(Expr)) {
2747    Error(Loc, "illegal expression");
2748    return MatchOperand_ParseFail;
2749  }
2750  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2751  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2752    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2753    return MatchOperand_ParseFail;
2754  }
2755  int Val = CE->getValue();
2756
2757  // Check for and consume the closing '}'
2758  if (Parser.getTok().isNot(AsmToken::RCurly))
2759    return MatchOperand_ParseFail;
2760  SMLoc E = Parser.getTok().getLoc();
2761  Parser.Lex(); // Eat the '}'
2762
2763  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2764  return MatchOperand_Success;
2765}
2766
2767// For register list parsing, we need to map from raw GPR register numbering
2768// to the enumeration values. The enumeration values aren't sorted by
2769// register number due to our using "sp", "lr" and "pc" as canonical names.
2770static unsigned getNextRegister(unsigned Reg) {
2771  // If this is a GPR, we need to do it manually, otherwise we can rely
2772  // on the sort ordering of the enumeration since the other reg-classes
2773  // are sane.
2774  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2775    return Reg + 1;
2776  switch(Reg) {
2777  default: llvm_unreachable("Invalid GPR number!");
2778  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2779  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2780  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2781  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2782  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2783  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2784  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2785  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2786  }
2787}
2788
2789// Return the low-subreg of a given Q register.
2790static unsigned getDRegFromQReg(unsigned QReg) {
2791  switch (QReg) {
2792  default: llvm_unreachable("expected a Q register!");
2793  case ARM::Q0:  return ARM::D0;
2794  case ARM::Q1:  return ARM::D2;
2795  case ARM::Q2:  return ARM::D4;
2796  case ARM::Q3:  return ARM::D6;
2797  case ARM::Q4:  return ARM::D8;
2798  case ARM::Q5:  return ARM::D10;
2799  case ARM::Q6:  return ARM::D12;
2800  case ARM::Q7:  return ARM::D14;
2801  case ARM::Q8:  return ARM::D16;
2802  case ARM::Q9:  return ARM::D18;
2803  case ARM::Q10: return ARM::D20;
2804  case ARM::Q11: return ARM::D22;
2805  case ARM::Q12: return ARM::D24;
2806  case ARM::Q13: return ARM::D26;
2807  case ARM::Q14: return ARM::D28;
2808  case ARM::Q15: return ARM::D30;
2809  }
2810}
2811
2812/// Parse a register list.
2813bool ARMAsmParser::
2814parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2815  assert(Parser.getTok().is(AsmToken::LCurly) &&
2816         "Token is not a Left Curly Brace");
2817  SMLoc S = Parser.getTok().getLoc();
2818  Parser.Lex(); // Eat '{' token.
2819  SMLoc RegLoc = Parser.getTok().getLoc();
2820
2821  // Check the first register in the list to see what register class
2822  // this is a list of.
2823  int Reg = tryParseRegister();
2824  if (Reg == -1)
2825    return Error(RegLoc, "register expected");
2826
2827  // The reglist instructions have at most 16 registers, so reserve
2828  // space for that many.
2829  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2830
2831  // Allow Q regs and just interpret them as the two D sub-registers.
2832  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2833    Reg = getDRegFromQReg(Reg);
2834    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2835    ++Reg;
2836  }
2837  const MCRegisterClass *RC;
2838  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2839    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2840  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2841    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2842  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2843    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2844  else
2845    return Error(RegLoc, "invalid register in register list");
2846
2847  // Store the register.
2848  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2849
2850  // This starts immediately after the first register token in the list,
2851  // so we can see either a comma or a minus (range separator) as a legal
2852  // next token.
2853  while (Parser.getTok().is(AsmToken::Comma) ||
2854         Parser.getTok().is(AsmToken::Minus)) {
2855    if (Parser.getTok().is(AsmToken::Minus)) {
2856      Parser.Lex(); // Eat the minus.
2857      SMLoc EndLoc = Parser.getTok().getLoc();
2858      int EndReg = tryParseRegister();
2859      if (EndReg == -1)
2860        return Error(EndLoc, "register expected");
2861      // Allow Q regs and just interpret them as the two D sub-registers.
2862      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2863        EndReg = getDRegFromQReg(EndReg) + 1;
2864      // If the register is the same as the start reg, there's nothing
2865      // more to do.
2866      if (Reg == EndReg)
2867        continue;
2868      // The register must be in the same register class as the first.
2869      if (!RC->contains(EndReg))
2870        return Error(EndLoc, "invalid register in register list");
2871      // Ranges must go from low to high.
2872      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2873        return Error(EndLoc, "bad range in register list");
2874
2875      // Add all the registers in the range to the register list.
2876      while (Reg != EndReg) {
2877        Reg = getNextRegister(Reg);
2878        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2879      }
2880      continue;
2881    }
2882    Parser.Lex(); // Eat the comma.
2883    RegLoc = Parser.getTok().getLoc();
2884    int OldReg = Reg;
2885    const AsmToken RegTok = Parser.getTok();
2886    Reg = tryParseRegister();
2887    if (Reg == -1)
2888      return Error(RegLoc, "register expected");
2889    // Allow Q regs and just interpret them as the two D sub-registers.
2890    bool isQReg = false;
2891    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2892      Reg = getDRegFromQReg(Reg);
2893      isQReg = true;
2894    }
2895    // The register must be in the same register class as the first.
2896    if (!RC->contains(Reg))
2897      return Error(RegLoc, "invalid register in register list");
2898    // List must be monotonically increasing.
2899    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg)) {
2900      if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2901        Warning(RegLoc, "register list not in ascending order");
2902      else
2903        return Error(RegLoc, "register list not in ascending order");
2904    }
2905    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2906      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2907              ") in register list");
2908      continue;
2909    }
2910    // VFP register lists must also be contiguous.
2911    // It's OK to use the enumeration values directly here rather, as the
2912    // VFP register classes have the enum sorted properly.
2913    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2914        Reg != OldReg + 1)
2915      return Error(RegLoc, "non-contiguous register range");
2916    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2917    if (isQReg)
2918      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2919  }
2920
2921  SMLoc E = Parser.getTok().getLoc();
2922  if (Parser.getTok().isNot(AsmToken::RCurly))
2923    return Error(E, "'}' expected");
2924  Parser.Lex(); // Eat '}' token.
2925
2926  // Push the register list operand.
2927  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2928
2929  // The ARM system instruction variants for LDM/STM have a '^' token here.
2930  if (Parser.getTok().is(AsmToken::Caret)) {
2931    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2932    Parser.Lex(); // Eat '^' token.
2933  }
2934
2935  return false;
2936}
2937
2938// Helper function to parse the lane index for vector lists.
2939ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2940parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2941  Index = 0; // Always return a defined index value.
2942  if (Parser.getTok().is(AsmToken::LBrac)) {
2943    Parser.Lex(); // Eat the '['.
2944    if (Parser.getTok().is(AsmToken::RBrac)) {
2945      // "Dn[]" is the 'all lanes' syntax.
2946      LaneKind = AllLanes;
2947      Parser.Lex(); // Eat the ']'.
2948      return MatchOperand_Success;
2949    }
2950
2951    // There's an optional '#' token here. Normally there wouldn't be, but
2952    // inline assemble puts one in, and it's friendly to accept that.
2953    if (Parser.getTok().is(AsmToken::Hash))
2954      Parser.Lex(); // Eat the '#'
2955
2956    const MCExpr *LaneIndex;
2957    SMLoc Loc = Parser.getTok().getLoc();
2958    if (getParser().ParseExpression(LaneIndex)) {
2959      Error(Loc, "illegal expression");
2960      return MatchOperand_ParseFail;
2961    }
2962    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LaneIndex);
2963    if (!CE) {
2964      Error(Loc, "lane index must be empty or an integer");
2965      return MatchOperand_ParseFail;
2966    }
2967    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2968      Error(Parser.getTok().getLoc(), "']' expected");
2969      return MatchOperand_ParseFail;
2970    }
2971    Parser.Lex(); // Eat the ']'.
2972    int64_t Val = CE->getValue();
2973
2974    // FIXME: Make this range check context sensitive for .8, .16, .32.
2975    if (Val < 0 || Val > 7) {
2976      Error(Parser.getTok().getLoc(), "lane index out of range");
2977      return MatchOperand_ParseFail;
2978    }
2979    Index = Val;
2980    LaneKind = IndexedLane;
2981    return MatchOperand_Success;
2982  }
2983  LaneKind = NoLanes;
2984  return MatchOperand_Success;
2985}
2986
2987// parse a vector register list
2988ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2989parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2990  VectorLaneTy LaneKind;
2991  unsigned LaneIndex;
2992  SMLoc S = Parser.getTok().getLoc();
2993  // As an extension (to match gas), support a plain D register or Q register
2994  // (without encosing curly braces) as a single or double entry list,
2995  // respectively.
2996  if (Parser.getTok().is(AsmToken::Identifier)) {
2997    int Reg = tryParseRegister();
2998    if (Reg == -1)
2999      return MatchOperand_NoMatch;
3000    SMLoc E = Parser.getTok().getLoc();
3001    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
3002      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3003      if (Res != MatchOperand_Success)
3004        return Res;
3005      switch (LaneKind) {
3006      case NoLanes:
3007        E = Parser.getTok().getLoc();
3008        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
3009        break;
3010      case AllLanes:
3011        E = Parser.getTok().getLoc();
3012        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, false,
3013                                                                S, E));
3014        break;
3015      case IndexedLane:
3016        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
3017                                                               LaneIndex,
3018                                                               false, S, E));
3019        break;
3020      }
3021      return MatchOperand_Success;
3022    }
3023    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3024      Reg = getDRegFromQReg(Reg);
3025      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
3026      if (Res != MatchOperand_Success)
3027        return Res;
3028      switch (LaneKind) {
3029      case NoLanes:
3030        E = Parser.getTok().getLoc();
3031        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3032                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3033        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
3034        break;
3035      case AllLanes:
3036        E = Parser.getTok().getLoc();
3037        Reg = MRI->getMatchingSuperReg(Reg, ARM::dsub_0,
3038                                   &ARMMCRegisterClasses[ARM::DPairRegClassID]);
3039        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, false,
3040                                                                S, E));
3041        break;
3042      case IndexedLane:
3043        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
3044                                                               LaneIndex,
3045                                                               false, S, E));
3046        break;
3047      }
3048      return MatchOperand_Success;
3049    }
3050    Error(S, "vector register expected");
3051    return MatchOperand_ParseFail;
3052  }
3053
3054  if (Parser.getTok().isNot(AsmToken::LCurly))
3055    return MatchOperand_NoMatch;
3056
3057  Parser.Lex(); // Eat '{' token.
3058  SMLoc RegLoc = Parser.getTok().getLoc();
3059
3060  int Reg = tryParseRegister();
3061  if (Reg == -1) {
3062    Error(RegLoc, "register expected");
3063    return MatchOperand_ParseFail;
3064  }
3065  unsigned Count = 1;
3066  int Spacing = 0;
3067  unsigned FirstReg = Reg;
3068  // The list is of D registers, but we also allow Q regs and just interpret
3069  // them as the two D sub-registers.
3070  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3071    FirstReg = Reg = getDRegFromQReg(Reg);
3072    Spacing = 1; // double-spacing requires explicit D registers, otherwise
3073                 // it's ambiguous with four-register single spaced.
3074    ++Reg;
3075    ++Count;
3076  }
3077  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
3078    return MatchOperand_ParseFail;
3079
3080  while (Parser.getTok().is(AsmToken::Comma) ||
3081         Parser.getTok().is(AsmToken::Minus)) {
3082    if (Parser.getTok().is(AsmToken::Minus)) {
3083      if (!Spacing)
3084        Spacing = 1; // Register range implies a single spaced list.
3085      else if (Spacing == 2) {
3086        Error(Parser.getTok().getLoc(),
3087              "sequential registers in double spaced list");
3088        return MatchOperand_ParseFail;
3089      }
3090      Parser.Lex(); // Eat the minus.
3091      SMLoc EndLoc = Parser.getTok().getLoc();
3092      int EndReg = tryParseRegister();
3093      if (EndReg == -1) {
3094        Error(EndLoc, "register expected");
3095        return MatchOperand_ParseFail;
3096      }
3097      // Allow Q regs and just interpret them as the two D sub-registers.
3098      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
3099        EndReg = getDRegFromQReg(EndReg) + 1;
3100      // If the register is the same as the start reg, there's nothing
3101      // more to do.
3102      if (Reg == EndReg)
3103        continue;
3104      // The register must be in the same register class as the first.
3105      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
3106        Error(EndLoc, "invalid register in register list");
3107        return MatchOperand_ParseFail;
3108      }
3109      // Ranges must go from low to high.
3110      if (Reg > EndReg) {
3111        Error(EndLoc, "bad range in register list");
3112        return MatchOperand_ParseFail;
3113      }
3114      // Parse the lane specifier if present.
3115      VectorLaneTy NextLaneKind;
3116      unsigned NextLaneIndex;
3117      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3118        return MatchOperand_ParseFail;
3119      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3120        Error(EndLoc, "mismatched lane index in register list");
3121        return MatchOperand_ParseFail;
3122      }
3123      EndLoc = Parser.getTok().getLoc();
3124
3125      // Add all the registers in the range to the register list.
3126      Count += EndReg - Reg;
3127      Reg = EndReg;
3128      continue;
3129    }
3130    Parser.Lex(); // Eat the comma.
3131    RegLoc = Parser.getTok().getLoc();
3132    int OldReg = Reg;
3133    Reg = tryParseRegister();
3134    if (Reg == -1) {
3135      Error(RegLoc, "register expected");
3136      return MatchOperand_ParseFail;
3137    }
3138    // vector register lists must be contiguous.
3139    // It's OK to use the enumeration values directly here rather, as the
3140    // VFP register classes have the enum sorted properly.
3141    //
3142    // The list is of D registers, but we also allow Q regs and just interpret
3143    // them as the two D sub-registers.
3144    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
3145      if (!Spacing)
3146        Spacing = 1; // Register range implies a single spaced list.
3147      else if (Spacing == 2) {
3148        Error(RegLoc,
3149              "invalid register in double-spaced list (must be 'D' register')");
3150        return MatchOperand_ParseFail;
3151      }
3152      Reg = getDRegFromQReg(Reg);
3153      if (Reg != OldReg + 1) {
3154        Error(RegLoc, "non-contiguous register range");
3155        return MatchOperand_ParseFail;
3156      }
3157      ++Reg;
3158      Count += 2;
3159      // Parse the lane specifier if present.
3160      VectorLaneTy NextLaneKind;
3161      unsigned NextLaneIndex;
3162      SMLoc EndLoc = Parser.getTok().getLoc();
3163      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3164        return MatchOperand_ParseFail;
3165      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3166        Error(EndLoc, "mismatched lane index in register list");
3167        return MatchOperand_ParseFail;
3168      }
3169      continue;
3170    }
3171    // Normal D register.
3172    // Figure out the register spacing (single or double) of the list if
3173    // we don't know it already.
3174    if (!Spacing)
3175      Spacing = 1 + (Reg == OldReg + 2);
3176
3177    // Just check that it's contiguous and keep going.
3178    if (Reg != OldReg + Spacing) {
3179      Error(RegLoc, "non-contiguous register range");
3180      return MatchOperand_ParseFail;
3181    }
3182    ++Count;
3183    // Parse the lane specifier if present.
3184    VectorLaneTy NextLaneKind;
3185    unsigned NextLaneIndex;
3186    SMLoc EndLoc = Parser.getTok().getLoc();
3187    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3188      return MatchOperand_ParseFail;
3189    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3190      Error(EndLoc, "mismatched lane index in register list");
3191      return MatchOperand_ParseFail;
3192    }
3193  }
3194
3195  SMLoc E = Parser.getTok().getLoc();
3196  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3197    Error(E, "'}' expected");
3198    return MatchOperand_ParseFail;
3199  }
3200  Parser.Lex(); // Eat '}' token.
3201
3202  switch (LaneKind) {
3203  case NoLanes:
3204    // Two-register operands have been converted to the
3205    // composite register classes.
3206    if (Count == 2) {
3207      const MCRegisterClass *RC = (Spacing == 1) ?
3208        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3209        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3210      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3211    }
3212
3213    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3214                                                    (Spacing == 2), S, E));
3215    break;
3216  case AllLanes:
3217    // Two-register operands have been converted to the
3218    // composite register classes.
3219    if (Count == 2) {
3220      const MCRegisterClass *RC = (Spacing == 1) ?
3221        &ARMMCRegisterClasses[ARM::DPairRegClassID] :
3222        &ARMMCRegisterClasses[ARM::DPairSpcRegClassID];
3223      FirstReg = MRI->getMatchingSuperReg(FirstReg, ARM::dsub_0, RC);
3224    }
3225    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3226                                                            (Spacing == 2),
3227                                                            S, E));
3228    break;
3229  case IndexedLane:
3230    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3231                                                           LaneIndex,
3232                                                           (Spacing == 2),
3233                                                           S, E));
3234    break;
3235  }
3236  return MatchOperand_Success;
3237}
3238
3239/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3240ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3241parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3242  SMLoc S = Parser.getTok().getLoc();
3243  const AsmToken &Tok = Parser.getTok();
3244  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3245  StringRef OptStr = Tok.getString();
3246
3247  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3248    .Case("sy",    ARM_MB::SY)
3249    .Case("st",    ARM_MB::ST)
3250    .Case("sh",    ARM_MB::ISH)
3251    .Case("ish",   ARM_MB::ISH)
3252    .Case("shst",  ARM_MB::ISHST)
3253    .Case("ishst", ARM_MB::ISHST)
3254    .Case("nsh",   ARM_MB::NSH)
3255    .Case("un",    ARM_MB::NSH)
3256    .Case("nshst", ARM_MB::NSHST)
3257    .Case("unst",  ARM_MB::NSHST)
3258    .Case("osh",   ARM_MB::OSH)
3259    .Case("oshst", ARM_MB::OSHST)
3260    .Default(~0U);
3261
3262  if (Opt == ~0U)
3263    return MatchOperand_NoMatch;
3264
3265  Parser.Lex(); // Eat identifier token.
3266  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3267  return MatchOperand_Success;
3268}
3269
3270/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3271ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3272parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3273  SMLoc S = Parser.getTok().getLoc();
3274  const AsmToken &Tok = Parser.getTok();
3275  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3276  StringRef IFlagsStr = Tok.getString();
3277
3278  // An iflags string of "none" is interpreted to mean that none of the AIF
3279  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3280  unsigned IFlags = 0;
3281  if (IFlagsStr != "none") {
3282        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3283      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3284        .Case("a", ARM_PROC::A)
3285        .Case("i", ARM_PROC::I)
3286        .Case("f", ARM_PROC::F)
3287        .Default(~0U);
3288
3289      // If some specific iflag is already set, it means that some letter is
3290      // present more than once, this is not acceptable.
3291      if (Flag == ~0U || (IFlags & Flag))
3292        return MatchOperand_NoMatch;
3293
3294      IFlags |= Flag;
3295    }
3296  }
3297
3298  Parser.Lex(); // Eat identifier token.
3299  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3300  return MatchOperand_Success;
3301}
3302
3303/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3304ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3305parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3306  SMLoc S = Parser.getTok().getLoc();
3307  const AsmToken &Tok = Parser.getTok();
3308  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3309  StringRef Mask = Tok.getString();
3310
3311  if (isMClass()) {
3312    // See ARMv6-M 10.1.1
3313    std::string Name = Mask.lower();
3314    unsigned FlagsVal = StringSwitch<unsigned>(Name)
3315      .Case("apsr", 0)
3316      .Case("iapsr", 1)
3317      .Case("eapsr", 2)
3318      .Case("xpsr", 3)
3319      .Case("ipsr", 5)
3320      .Case("epsr", 6)
3321      .Case("iepsr", 7)
3322      .Case("msp", 8)
3323      .Case("psp", 9)
3324      .Case("primask", 16)
3325      .Case("basepri", 17)
3326      .Case("basepri_max", 18)
3327      .Case("faultmask", 19)
3328      .Case("control", 20)
3329      .Default(~0U);
3330
3331    if (FlagsVal == ~0U)
3332      return MatchOperand_NoMatch;
3333
3334    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3335      // basepri, basepri_max and faultmask only valid for V7m.
3336      return MatchOperand_NoMatch;
3337
3338    Parser.Lex(); // Eat identifier token.
3339    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3340    return MatchOperand_Success;
3341  }
3342
3343  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3344  size_t Start = 0, Next = Mask.find('_');
3345  StringRef Flags = "";
3346  std::string SpecReg = Mask.slice(Start, Next).lower();
3347  if (Next != StringRef::npos)
3348    Flags = Mask.slice(Next+1, Mask.size());
3349
3350  // FlagsVal contains the complete mask:
3351  // 3-0: Mask
3352  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3353  unsigned FlagsVal = 0;
3354
3355  if (SpecReg == "apsr") {
3356    FlagsVal = StringSwitch<unsigned>(Flags)
3357    .Case("nzcvq",  0x8) // same as CPSR_f
3358    .Case("g",      0x4) // same as CPSR_s
3359    .Case("nzcvqg", 0xc) // same as CPSR_fs
3360    .Default(~0U);
3361
3362    if (FlagsVal == ~0U) {
3363      if (!Flags.empty())
3364        return MatchOperand_NoMatch;
3365      else
3366        FlagsVal = 8; // No flag
3367    }
3368  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3369    // cpsr_all is an alias for cpsr_fc, as is plain cpsr.
3370    if (Flags == "all" || Flags == "")
3371      Flags = "fc";
3372    for (int i = 0, e = Flags.size(); i != e; ++i) {
3373      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3374      .Case("c", 1)
3375      .Case("x", 2)
3376      .Case("s", 4)
3377      .Case("f", 8)
3378      .Default(~0U);
3379
3380      // If some specific flag is already set, it means that some letter is
3381      // present more than once, this is not acceptable.
3382      if (FlagsVal == ~0U || (FlagsVal & Flag))
3383        return MatchOperand_NoMatch;
3384      FlagsVal |= Flag;
3385    }
3386  } else // No match for special register.
3387    return MatchOperand_NoMatch;
3388
3389  // Special register without flags is NOT equivalent to "fc" flags.
3390  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3391  // two lines would enable gas compatibility at the expense of breaking
3392  // round-tripping.
3393  //
3394  // if (!FlagsVal)
3395  //  FlagsVal = 0x9;
3396
3397  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3398  if (SpecReg == "spsr")
3399    FlagsVal |= 16;
3400
3401  Parser.Lex(); // Eat identifier token.
3402  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3403  return MatchOperand_Success;
3404}
3405
3406ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3407parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3408            int Low, int High) {
3409  const AsmToken &Tok = Parser.getTok();
3410  if (Tok.isNot(AsmToken::Identifier)) {
3411    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3412    return MatchOperand_ParseFail;
3413  }
3414  StringRef ShiftName = Tok.getString();
3415  std::string LowerOp = Op.lower();
3416  std::string UpperOp = Op.upper();
3417  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3418    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3419    return MatchOperand_ParseFail;
3420  }
3421  Parser.Lex(); // Eat shift type token.
3422
3423  // There must be a '#' and a shift amount.
3424  if (Parser.getTok().isNot(AsmToken::Hash) &&
3425      Parser.getTok().isNot(AsmToken::Dollar)) {
3426    Error(Parser.getTok().getLoc(), "'#' expected");
3427    return MatchOperand_ParseFail;
3428  }
3429  Parser.Lex(); // Eat hash token.
3430
3431  const MCExpr *ShiftAmount;
3432  SMLoc Loc = Parser.getTok().getLoc();
3433  if (getParser().ParseExpression(ShiftAmount)) {
3434    Error(Loc, "illegal expression");
3435    return MatchOperand_ParseFail;
3436  }
3437  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3438  if (!CE) {
3439    Error(Loc, "constant expression expected");
3440    return MatchOperand_ParseFail;
3441  }
3442  int Val = CE->getValue();
3443  if (Val < Low || Val > High) {
3444    Error(Loc, "immediate value out of range");
3445    return MatchOperand_ParseFail;
3446  }
3447
3448  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3449
3450  return MatchOperand_Success;
3451}
3452
3453ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3454parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3455  const AsmToken &Tok = Parser.getTok();
3456  SMLoc S = Tok.getLoc();
3457  if (Tok.isNot(AsmToken::Identifier)) {
3458    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3459    return MatchOperand_ParseFail;
3460  }
3461  int Val = StringSwitch<int>(Tok.getString())
3462    .Case("be", 1)
3463    .Case("le", 0)
3464    .Default(-1);
3465  Parser.Lex(); // Eat the token.
3466
3467  if (Val == -1) {
3468    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3469    return MatchOperand_ParseFail;
3470  }
3471  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3472                                                                  getContext()),
3473                                           S, Parser.getTok().getLoc()));
3474  return MatchOperand_Success;
3475}
3476
3477/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3478/// instructions. Legal values are:
3479///     lsl #n  'n' in [0,31]
3480///     asr #n  'n' in [1,32]
3481///             n == 32 encoded as n == 0.
3482ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3483parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3484  const AsmToken &Tok = Parser.getTok();
3485  SMLoc S = Tok.getLoc();
3486  if (Tok.isNot(AsmToken::Identifier)) {
3487    Error(S, "shift operator 'asr' or 'lsl' expected");
3488    return MatchOperand_ParseFail;
3489  }
3490  StringRef ShiftName = Tok.getString();
3491  bool isASR;
3492  if (ShiftName == "lsl" || ShiftName == "LSL")
3493    isASR = false;
3494  else if (ShiftName == "asr" || ShiftName == "ASR")
3495    isASR = true;
3496  else {
3497    Error(S, "shift operator 'asr' or 'lsl' expected");
3498    return MatchOperand_ParseFail;
3499  }
3500  Parser.Lex(); // Eat the operator.
3501
3502  // A '#' and a shift amount.
3503  if (Parser.getTok().isNot(AsmToken::Hash) &&
3504      Parser.getTok().isNot(AsmToken::Dollar)) {
3505    Error(Parser.getTok().getLoc(), "'#' expected");
3506    return MatchOperand_ParseFail;
3507  }
3508  Parser.Lex(); // Eat hash token.
3509
3510  const MCExpr *ShiftAmount;
3511  SMLoc E = Parser.getTok().getLoc();
3512  if (getParser().ParseExpression(ShiftAmount)) {
3513    Error(E, "malformed shift expression");
3514    return MatchOperand_ParseFail;
3515  }
3516  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3517  if (!CE) {
3518    Error(E, "shift amount must be an immediate");
3519    return MatchOperand_ParseFail;
3520  }
3521
3522  int64_t Val = CE->getValue();
3523  if (isASR) {
3524    // Shift amount must be in [1,32]
3525    if (Val < 1 || Val > 32) {
3526      Error(E, "'asr' shift amount must be in range [1,32]");
3527      return MatchOperand_ParseFail;
3528    }
3529    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3530    if (isThumb() && Val == 32) {
3531      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3532      return MatchOperand_ParseFail;
3533    }
3534    if (Val == 32) Val = 0;
3535  } else {
3536    // Shift amount must be in [1,32]
3537    if (Val < 0 || Val > 31) {
3538      Error(E, "'lsr' shift amount must be in range [0,31]");
3539      return MatchOperand_ParseFail;
3540    }
3541  }
3542
3543  E = Parser.getTok().getLoc();
3544  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3545
3546  return MatchOperand_Success;
3547}
3548
3549/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3550/// of instructions. Legal values are:
3551///     ror #n  'n' in {0, 8, 16, 24}
3552ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3553parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3554  const AsmToken &Tok = Parser.getTok();
3555  SMLoc S = Tok.getLoc();
3556  if (Tok.isNot(AsmToken::Identifier))
3557    return MatchOperand_NoMatch;
3558  StringRef ShiftName = Tok.getString();
3559  if (ShiftName != "ror" && ShiftName != "ROR")
3560    return MatchOperand_NoMatch;
3561  Parser.Lex(); // Eat the operator.
3562
3563  // A '#' and a rotate amount.
3564  if (Parser.getTok().isNot(AsmToken::Hash) &&
3565      Parser.getTok().isNot(AsmToken::Dollar)) {
3566    Error(Parser.getTok().getLoc(), "'#' expected");
3567    return MatchOperand_ParseFail;
3568  }
3569  Parser.Lex(); // Eat hash token.
3570
3571  const MCExpr *ShiftAmount;
3572  SMLoc E = Parser.getTok().getLoc();
3573  if (getParser().ParseExpression(ShiftAmount)) {
3574    Error(E, "malformed rotate expression");
3575    return MatchOperand_ParseFail;
3576  }
3577  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3578  if (!CE) {
3579    Error(E, "rotate amount must be an immediate");
3580    return MatchOperand_ParseFail;
3581  }
3582
3583  int64_t Val = CE->getValue();
3584  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3585  // normally, zero is represented in asm by omitting the rotate operand
3586  // entirely.
3587  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3588    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3589    return MatchOperand_ParseFail;
3590  }
3591
3592  E = Parser.getTok().getLoc();
3593  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3594
3595  return MatchOperand_Success;
3596}
3597
3598ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3599parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3600  SMLoc S = Parser.getTok().getLoc();
3601  // The bitfield descriptor is really two operands, the LSB and the width.
3602  if (Parser.getTok().isNot(AsmToken::Hash) &&
3603      Parser.getTok().isNot(AsmToken::Dollar)) {
3604    Error(Parser.getTok().getLoc(), "'#' expected");
3605    return MatchOperand_ParseFail;
3606  }
3607  Parser.Lex(); // Eat hash token.
3608
3609  const MCExpr *LSBExpr;
3610  SMLoc E = Parser.getTok().getLoc();
3611  if (getParser().ParseExpression(LSBExpr)) {
3612    Error(E, "malformed immediate expression");
3613    return MatchOperand_ParseFail;
3614  }
3615  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3616  if (!CE) {
3617    Error(E, "'lsb' operand must be an immediate");
3618    return MatchOperand_ParseFail;
3619  }
3620
3621  int64_t LSB = CE->getValue();
3622  // The LSB must be in the range [0,31]
3623  if (LSB < 0 || LSB > 31) {
3624    Error(E, "'lsb' operand must be in the range [0,31]");
3625    return MatchOperand_ParseFail;
3626  }
3627  E = Parser.getTok().getLoc();
3628
3629  // Expect another immediate operand.
3630  if (Parser.getTok().isNot(AsmToken::Comma)) {
3631    Error(Parser.getTok().getLoc(), "too few operands");
3632    return MatchOperand_ParseFail;
3633  }
3634  Parser.Lex(); // Eat hash token.
3635  if (Parser.getTok().isNot(AsmToken::Hash) &&
3636      Parser.getTok().isNot(AsmToken::Dollar)) {
3637    Error(Parser.getTok().getLoc(), "'#' expected");
3638    return MatchOperand_ParseFail;
3639  }
3640  Parser.Lex(); // Eat hash token.
3641
3642  const MCExpr *WidthExpr;
3643  if (getParser().ParseExpression(WidthExpr)) {
3644    Error(E, "malformed immediate expression");
3645    return MatchOperand_ParseFail;
3646  }
3647  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3648  if (!CE) {
3649    Error(E, "'width' operand must be an immediate");
3650    return MatchOperand_ParseFail;
3651  }
3652
3653  int64_t Width = CE->getValue();
3654  // The LSB must be in the range [1,32-lsb]
3655  if (Width < 1 || Width > 32 - LSB) {
3656    Error(E, "'width' operand must be in the range [1,32-lsb]");
3657    return MatchOperand_ParseFail;
3658  }
3659  E = Parser.getTok().getLoc();
3660
3661  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3662
3663  return MatchOperand_Success;
3664}
3665
3666ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3667parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3668  // Check for a post-index addressing register operand. Specifically:
3669  // postidx_reg := '+' register {, shift}
3670  //              | '-' register {, shift}
3671  //              | register {, shift}
3672
3673  // This method must return MatchOperand_NoMatch without consuming any tokens
3674  // in the case where there is no match, as other alternatives take other
3675  // parse methods.
3676  AsmToken Tok = Parser.getTok();
3677  SMLoc S = Tok.getLoc();
3678  bool haveEaten = false;
3679  bool isAdd = true;
3680  int Reg = -1;
3681  if (Tok.is(AsmToken::Plus)) {
3682    Parser.Lex(); // Eat the '+' token.
3683    haveEaten = true;
3684  } else if (Tok.is(AsmToken::Minus)) {
3685    Parser.Lex(); // Eat the '-' token.
3686    isAdd = false;
3687    haveEaten = true;
3688  }
3689  if (Parser.getTok().is(AsmToken::Identifier))
3690    Reg = tryParseRegister();
3691  if (Reg == -1) {
3692    if (!haveEaten)
3693      return MatchOperand_NoMatch;
3694    Error(Parser.getTok().getLoc(), "register expected");
3695    return MatchOperand_ParseFail;
3696  }
3697  SMLoc E = Parser.getTok().getLoc();
3698
3699  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3700  unsigned ShiftImm = 0;
3701  if (Parser.getTok().is(AsmToken::Comma)) {
3702    Parser.Lex(); // Eat the ','.
3703    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3704      return MatchOperand_ParseFail;
3705  }
3706
3707  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3708                                                  ShiftImm, S, E));
3709
3710  return MatchOperand_Success;
3711}
3712
3713ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3714parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3715  // Check for a post-index addressing register operand. Specifically:
3716  // am3offset := '+' register
3717  //              | '-' register
3718  //              | register
3719  //              | # imm
3720  //              | # + imm
3721  //              | # - imm
3722
3723  // This method must return MatchOperand_NoMatch without consuming any tokens
3724  // in the case where there is no match, as other alternatives take other
3725  // parse methods.
3726  AsmToken Tok = Parser.getTok();
3727  SMLoc S = Tok.getLoc();
3728
3729  // Do immediates first, as we always parse those if we have a '#'.
3730  if (Parser.getTok().is(AsmToken::Hash) ||
3731      Parser.getTok().is(AsmToken::Dollar)) {
3732    Parser.Lex(); // Eat the '#'.
3733    // Explicitly look for a '-', as we need to encode negative zero
3734    // differently.
3735    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3736    const MCExpr *Offset;
3737    if (getParser().ParseExpression(Offset))
3738      return MatchOperand_ParseFail;
3739    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3740    if (!CE) {
3741      Error(S, "constant expression expected");
3742      return MatchOperand_ParseFail;
3743    }
3744    SMLoc E = Tok.getLoc();
3745    // Negative zero is encoded as the flag value INT32_MIN.
3746    int32_t Val = CE->getValue();
3747    if (isNegative && Val == 0)
3748      Val = INT32_MIN;
3749
3750    Operands.push_back(
3751      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3752
3753    return MatchOperand_Success;
3754  }
3755
3756
3757  bool haveEaten = false;
3758  bool isAdd = true;
3759  int Reg = -1;
3760  if (Tok.is(AsmToken::Plus)) {
3761    Parser.Lex(); // Eat the '+' token.
3762    haveEaten = true;
3763  } else if (Tok.is(AsmToken::Minus)) {
3764    Parser.Lex(); // Eat the '-' token.
3765    isAdd = false;
3766    haveEaten = true;
3767  }
3768  if (Parser.getTok().is(AsmToken::Identifier))
3769    Reg = tryParseRegister();
3770  if (Reg == -1) {
3771    if (!haveEaten)
3772      return MatchOperand_NoMatch;
3773    Error(Parser.getTok().getLoc(), "register expected");
3774    return MatchOperand_ParseFail;
3775  }
3776  SMLoc E = Parser.getTok().getLoc();
3777
3778  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3779                                                  0, S, E));
3780
3781  return MatchOperand_Success;
3782}
3783
3784/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3785/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3786/// when they refer multiple MIOperands inside a single one.
3787bool ARMAsmParser::
3788cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3789             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3790  // Rt, Rt2
3791  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3792  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3793  // Create a writeback register dummy placeholder.
3794  Inst.addOperand(MCOperand::CreateReg(0));
3795  // addr
3796  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3797  // pred
3798  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3799  return true;
3800}
3801
3802/// cvtT2StrdPre - Convert parsed operands to MCInst.
3803/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3804/// when they refer multiple MIOperands inside a single one.
3805bool ARMAsmParser::
3806cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3807             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3808  // Create a writeback register dummy placeholder.
3809  Inst.addOperand(MCOperand::CreateReg(0));
3810  // Rt, Rt2
3811  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3812  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3813  // addr
3814  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3815  // pred
3816  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3817  return true;
3818}
3819
3820/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3821/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3822/// when they refer multiple MIOperands inside a single one.
3823bool ARMAsmParser::
3824cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3825                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3826  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3827
3828  // Create a writeback register dummy placeholder.
3829  Inst.addOperand(MCOperand::CreateImm(0));
3830
3831  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3832  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3833  return true;
3834}
3835
3836/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3837/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3838/// when they refer multiple MIOperands inside a single one.
3839bool ARMAsmParser::
3840cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3841                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3842  // Create a writeback register dummy placeholder.
3843  Inst.addOperand(MCOperand::CreateImm(0));
3844  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3845  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3846  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3847  return true;
3848}
3849
3850/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3851/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3852/// when they refer multiple MIOperands inside a single one.
3853bool ARMAsmParser::
3854cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3855                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3856  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3857
3858  // Create a writeback register dummy placeholder.
3859  Inst.addOperand(MCOperand::CreateImm(0));
3860
3861  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3862  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3863  return true;
3864}
3865
3866/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3867/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3868/// when they refer multiple MIOperands inside a single one.
3869bool ARMAsmParser::
3870cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3871                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3872  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3873
3874  // Create a writeback register dummy placeholder.
3875  Inst.addOperand(MCOperand::CreateImm(0));
3876
3877  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3878  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3879  return true;
3880}
3881
3882
3883/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3884/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3885/// when they refer multiple MIOperands inside a single one.
3886bool ARMAsmParser::
3887cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3888                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3889  // Create a writeback register dummy placeholder.
3890  Inst.addOperand(MCOperand::CreateImm(0));
3891  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3892  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3893  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3894  return true;
3895}
3896
3897/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3898/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3899/// when they refer multiple MIOperands inside a single one.
3900bool ARMAsmParser::
3901cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3902                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3903  // Create a writeback register dummy placeholder.
3904  Inst.addOperand(MCOperand::CreateImm(0));
3905  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3906  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3907  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3908  return true;
3909}
3910
3911/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3912/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3913/// when they refer multiple MIOperands inside a single one.
3914bool ARMAsmParser::
3915cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3916                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3917  // Create a writeback register dummy placeholder.
3918  Inst.addOperand(MCOperand::CreateImm(0));
3919  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3920  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3921  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3922  return true;
3923}
3924
3925/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3926/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3927/// when they refer multiple MIOperands inside a single one.
3928bool ARMAsmParser::
3929cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3930                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3931  // Rt
3932  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3933  // Create a writeback register dummy placeholder.
3934  Inst.addOperand(MCOperand::CreateImm(0));
3935  // addr
3936  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3937  // offset
3938  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3939  // pred
3940  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3941  return true;
3942}
3943
3944/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3945/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3946/// when they refer multiple MIOperands inside a single one.
3947bool ARMAsmParser::
3948cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3949                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3950  // Rt
3951  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3952  // Create a writeback register dummy placeholder.
3953  Inst.addOperand(MCOperand::CreateImm(0));
3954  // addr
3955  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3956  // offset
3957  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3958  // pred
3959  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3960  return true;
3961}
3962
3963/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3964/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3965/// when they refer multiple MIOperands inside a single one.
3966bool ARMAsmParser::
3967cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3968                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3969  // Create a writeback register dummy placeholder.
3970  Inst.addOperand(MCOperand::CreateImm(0));
3971  // Rt
3972  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3973  // addr
3974  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3975  // offset
3976  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3977  // pred
3978  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3979  return true;
3980}
3981
3982/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3983/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3984/// when they refer multiple MIOperands inside a single one.
3985bool ARMAsmParser::
3986cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3987                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3988  // Create a writeback register dummy placeholder.
3989  Inst.addOperand(MCOperand::CreateImm(0));
3990  // Rt
3991  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3992  // addr
3993  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3994  // offset
3995  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3996  // pred
3997  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3998  return true;
3999}
4000
4001/// cvtLdrdPre - Convert parsed operands to MCInst.
4002/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4003/// when they refer multiple MIOperands inside a single one.
4004bool ARMAsmParser::
4005cvtLdrdPre(MCInst &Inst, unsigned Opcode,
4006           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4007  // Rt, Rt2
4008  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4009  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4010  // Create a writeback register dummy placeholder.
4011  Inst.addOperand(MCOperand::CreateImm(0));
4012  // addr
4013  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4014  // pred
4015  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4016  return true;
4017}
4018
4019/// cvtStrdPre - Convert parsed operands to MCInst.
4020/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4021/// when they refer multiple MIOperands inside a single one.
4022bool ARMAsmParser::
4023cvtStrdPre(MCInst &Inst, unsigned Opcode,
4024           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4025  // Create a writeback register dummy placeholder.
4026  Inst.addOperand(MCOperand::CreateImm(0));
4027  // Rt, Rt2
4028  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4029  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4030  // addr
4031  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
4032  // pred
4033  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4034  return true;
4035}
4036
4037/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
4038/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4039/// when they refer multiple MIOperands inside a single one.
4040bool ARMAsmParser::
4041cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
4042                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4043  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
4044  // Create a writeback register dummy placeholder.
4045  Inst.addOperand(MCOperand::CreateImm(0));
4046  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
4047  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4048  return true;
4049}
4050
4051/// cvtThumbMultiple- Convert parsed operands to MCInst.
4052/// Needed here because the Asm Gen Matcher can't handle properly tied operands
4053/// when they refer multiple MIOperands inside a single one.
4054bool ARMAsmParser::
4055cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
4056           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4057  // The second source operand must be the same register as the destination
4058  // operand.
4059  if (Operands.size() == 6 &&
4060      (((ARMOperand*)Operands[3])->getReg() !=
4061       ((ARMOperand*)Operands[5])->getReg()) &&
4062      (((ARMOperand*)Operands[3])->getReg() !=
4063       ((ARMOperand*)Operands[4])->getReg())) {
4064    Error(Operands[3]->getStartLoc(),
4065          "destination register must match source register");
4066    return false;
4067  }
4068  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
4069  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
4070  // If we have a three-operand form, make sure to set Rn to be the operand
4071  // that isn't the same as Rd.
4072  unsigned RegOp = 4;
4073  if (Operands.size() == 6 &&
4074      ((ARMOperand*)Operands[4])->getReg() ==
4075        ((ARMOperand*)Operands[3])->getReg())
4076    RegOp = 5;
4077  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
4078  Inst.addOperand(Inst.getOperand(0));
4079  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
4080
4081  return true;
4082}
4083
4084bool ARMAsmParser::
4085cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
4086              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4087  // Vd
4088  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4089  // Create a writeback register dummy placeholder.
4090  Inst.addOperand(MCOperand::CreateImm(0));
4091  // Vn
4092  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4093  // pred
4094  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4095  return true;
4096}
4097
4098bool ARMAsmParser::
4099cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
4100                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4101  // Vd
4102  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4103  // Create a writeback register dummy placeholder.
4104  Inst.addOperand(MCOperand::CreateImm(0));
4105  // Vn
4106  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4107  // Vm
4108  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4109  // pred
4110  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4111  return true;
4112}
4113
4114bool ARMAsmParser::
4115cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
4116              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4117  // Create a writeback register dummy placeholder.
4118  Inst.addOperand(MCOperand::CreateImm(0));
4119  // Vn
4120  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4121  // Vt
4122  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4123  // pred
4124  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4125  return true;
4126}
4127
4128bool ARMAsmParser::
4129cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
4130                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4131  // Create a writeback register dummy placeholder.
4132  Inst.addOperand(MCOperand::CreateImm(0));
4133  // Vn
4134  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
4135  // Vm
4136  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
4137  // Vt
4138  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
4139  // pred
4140  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
4141  return true;
4142}
4143
4144/// Parse an ARM memory expression, return false if successful else return true
4145/// or an error.  The first token must be a '[' when called.
4146bool ARMAsmParser::
4147parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4148  SMLoc S, E;
4149  assert(Parser.getTok().is(AsmToken::LBrac) &&
4150         "Token is not a Left Bracket");
4151  S = Parser.getTok().getLoc();
4152  Parser.Lex(); // Eat left bracket token.
4153
4154  const AsmToken &BaseRegTok = Parser.getTok();
4155  int BaseRegNum = tryParseRegister();
4156  if (BaseRegNum == -1)
4157    return Error(BaseRegTok.getLoc(), "register expected");
4158
4159  // The next token must either be a comma or a closing bracket.
4160  const AsmToken &Tok = Parser.getTok();
4161  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
4162    return Error(Tok.getLoc(), "malformed memory operand");
4163
4164  if (Tok.is(AsmToken::RBrac)) {
4165    E = Tok.getLoc();
4166    Parser.Lex(); // Eat right bracket token.
4167
4168    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
4169                                             0, 0, false, S, E));
4170
4171    // If there's a pre-indexing writeback marker, '!', just add it as a token
4172    // operand. It's rather odd, but syntactically valid.
4173    if (Parser.getTok().is(AsmToken::Exclaim)) {
4174      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4175      Parser.Lex(); // Eat the '!'.
4176    }
4177
4178    return false;
4179  }
4180
4181  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
4182  Parser.Lex(); // Eat the comma.
4183
4184  // If we have a ':', it's an alignment specifier.
4185  if (Parser.getTok().is(AsmToken::Colon)) {
4186    Parser.Lex(); // Eat the ':'.
4187    E = Parser.getTok().getLoc();
4188
4189    const MCExpr *Expr;
4190    if (getParser().ParseExpression(Expr))
4191     return true;
4192
4193    // The expression has to be a constant. Memory references with relocations
4194    // don't come through here, as they use the <label> forms of the relevant
4195    // instructions.
4196    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4197    if (!CE)
4198      return Error (E, "constant expression expected");
4199
4200    unsigned Align = 0;
4201    switch (CE->getValue()) {
4202    default:
4203      return Error(E,
4204                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4205    case 16:  Align = 2; break;
4206    case 32:  Align = 4; break;
4207    case 64:  Align = 8; break;
4208    case 128: Align = 16; break;
4209    case 256: Align = 32; break;
4210    }
4211
4212    // Now we should have the closing ']'
4213    E = Parser.getTok().getLoc();
4214    if (Parser.getTok().isNot(AsmToken::RBrac))
4215      return Error(E, "']' expected");
4216    Parser.Lex(); // Eat right bracket token.
4217
4218    // Don't worry about range checking the value here. That's handled by
4219    // the is*() predicates.
4220    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4221                                             ARM_AM::no_shift, 0, Align,
4222                                             false, S, E));
4223
4224    // If there's a pre-indexing writeback marker, '!', just add it as a token
4225    // operand.
4226    if (Parser.getTok().is(AsmToken::Exclaim)) {
4227      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4228      Parser.Lex(); // Eat the '!'.
4229    }
4230
4231    return false;
4232  }
4233
4234  // If we have a '#', it's an immediate offset, else assume it's a register
4235  // offset. Be friendly and also accept a plain integer (without a leading
4236  // hash) for gas compatibility.
4237  if (Parser.getTok().is(AsmToken::Hash) ||
4238      Parser.getTok().is(AsmToken::Dollar) ||
4239      Parser.getTok().is(AsmToken::Integer)) {
4240    if (Parser.getTok().isNot(AsmToken::Integer))
4241      Parser.Lex(); // Eat the '#'.
4242    E = Parser.getTok().getLoc();
4243
4244    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4245    const MCExpr *Offset;
4246    if (getParser().ParseExpression(Offset))
4247     return true;
4248
4249    // The expression has to be a constant. Memory references with relocations
4250    // don't come through here, as they use the <label> forms of the relevant
4251    // instructions.
4252    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4253    if (!CE)
4254      return Error (E, "constant expression expected");
4255
4256    // If the constant was #-0, represent it as INT32_MIN.
4257    int32_t Val = CE->getValue();
4258    if (isNegative && Val == 0)
4259      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4260
4261    // Now we should have the closing ']'
4262    E = Parser.getTok().getLoc();
4263    if (Parser.getTok().isNot(AsmToken::RBrac))
4264      return Error(E, "']' expected");
4265    Parser.Lex(); // Eat right bracket token.
4266
4267    // Don't worry about range checking the value here. That's handled by
4268    // the is*() predicates.
4269    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4270                                             ARM_AM::no_shift, 0, 0,
4271                                             false, S, E));
4272
4273    // If there's a pre-indexing writeback marker, '!', just add it as a token
4274    // operand.
4275    if (Parser.getTok().is(AsmToken::Exclaim)) {
4276      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4277      Parser.Lex(); // Eat the '!'.
4278    }
4279
4280    return false;
4281  }
4282
4283  // The register offset is optionally preceded by a '+' or '-'
4284  bool isNegative = false;
4285  if (Parser.getTok().is(AsmToken::Minus)) {
4286    isNegative = true;
4287    Parser.Lex(); // Eat the '-'.
4288  } else if (Parser.getTok().is(AsmToken::Plus)) {
4289    // Nothing to do.
4290    Parser.Lex(); // Eat the '+'.
4291  }
4292
4293  E = Parser.getTok().getLoc();
4294  int OffsetRegNum = tryParseRegister();
4295  if (OffsetRegNum == -1)
4296    return Error(E, "register expected");
4297
4298  // If there's a shift operator, handle it.
4299  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4300  unsigned ShiftImm = 0;
4301  if (Parser.getTok().is(AsmToken::Comma)) {
4302    Parser.Lex(); // Eat the ','.
4303    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4304      return true;
4305  }
4306
4307  // Now we should have the closing ']'
4308  E = Parser.getTok().getLoc();
4309  if (Parser.getTok().isNot(AsmToken::RBrac))
4310    return Error(E, "']' expected");
4311  Parser.Lex(); // Eat right bracket token.
4312
4313  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4314                                           ShiftType, ShiftImm, 0, isNegative,
4315                                           S, E));
4316
4317  // If there's a pre-indexing writeback marker, '!', just add it as a token
4318  // operand.
4319  if (Parser.getTok().is(AsmToken::Exclaim)) {
4320    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4321    Parser.Lex(); // Eat the '!'.
4322  }
4323
4324  return false;
4325}
4326
4327/// parseMemRegOffsetShift - one of these two:
4328///   ( lsl | lsr | asr | ror ) , # shift_amount
4329///   rrx
4330/// return true if it parses a shift otherwise it returns false.
4331bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4332                                          unsigned &Amount) {
4333  SMLoc Loc = Parser.getTok().getLoc();
4334  const AsmToken &Tok = Parser.getTok();
4335  if (Tok.isNot(AsmToken::Identifier))
4336    return true;
4337  StringRef ShiftName = Tok.getString();
4338  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4339      ShiftName == "asl" || ShiftName == "ASL")
4340    St = ARM_AM::lsl;
4341  else if (ShiftName == "lsr" || ShiftName == "LSR")
4342    St = ARM_AM::lsr;
4343  else if (ShiftName == "asr" || ShiftName == "ASR")
4344    St = ARM_AM::asr;
4345  else if (ShiftName == "ror" || ShiftName == "ROR")
4346    St = ARM_AM::ror;
4347  else if (ShiftName == "rrx" || ShiftName == "RRX")
4348    St = ARM_AM::rrx;
4349  else
4350    return Error(Loc, "illegal shift operator");
4351  Parser.Lex(); // Eat shift type token.
4352
4353  // rrx stands alone.
4354  Amount = 0;
4355  if (St != ARM_AM::rrx) {
4356    Loc = Parser.getTok().getLoc();
4357    // A '#' and a shift amount.
4358    const AsmToken &HashTok = Parser.getTok();
4359    if (HashTok.isNot(AsmToken::Hash) &&
4360        HashTok.isNot(AsmToken::Dollar))
4361      return Error(HashTok.getLoc(), "'#' expected");
4362    Parser.Lex(); // Eat hash token.
4363
4364    const MCExpr *Expr;
4365    if (getParser().ParseExpression(Expr))
4366      return true;
4367    // Range check the immediate.
4368    // lsl, ror: 0 <= imm <= 31
4369    // lsr, asr: 0 <= imm <= 32
4370    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4371    if (!CE)
4372      return Error(Loc, "shift amount must be an immediate");
4373    int64_t Imm = CE->getValue();
4374    if (Imm < 0 ||
4375        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4376        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4377      return Error(Loc, "immediate shift value out of range");
4378    Amount = Imm;
4379  }
4380
4381  return false;
4382}
4383
4384/// parseFPImm - A floating point immediate expression operand.
4385ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4386parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4387  // Anything that can accept a floating point constant as an operand
4388  // needs to go through here, as the regular ParseExpression is
4389  // integer only.
4390  //
4391  // This routine still creates a generic Immediate operand, containing
4392  // a bitcast of the 64-bit floating point value. The various operands
4393  // that accept floats can check whether the value is valid for them
4394  // via the standard is*() predicates.
4395
4396  SMLoc S = Parser.getTok().getLoc();
4397
4398  if (Parser.getTok().isNot(AsmToken::Hash) &&
4399      Parser.getTok().isNot(AsmToken::Dollar))
4400    return MatchOperand_NoMatch;
4401
4402  // Disambiguate the VMOV forms that can accept an FP immediate.
4403  // vmov.f32 <sreg>, #imm
4404  // vmov.f64 <dreg>, #imm
4405  // vmov.f32 <dreg>, #imm  @ vector f32x2
4406  // vmov.f32 <qreg>, #imm  @ vector f32x4
4407  //
4408  // There are also the NEON VMOV instructions which expect an
4409  // integer constant. Make sure we don't try to parse an FPImm
4410  // for these:
4411  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4412  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4413  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4414                           TyOp->getToken() != ".f64"))
4415    return MatchOperand_NoMatch;
4416
4417  Parser.Lex(); // Eat the '#'.
4418
4419  // Handle negation, as that still comes through as a separate token.
4420  bool isNegative = false;
4421  if (Parser.getTok().is(AsmToken::Minus)) {
4422    isNegative = true;
4423    Parser.Lex();
4424  }
4425  const AsmToken &Tok = Parser.getTok();
4426  SMLoc Loc = Tok.getLoc();
4427  if (Tok.is(AsmToken::Real)) {
4428    APFloat RealVal(APFloat::IEEEsingle, Tok.getString());
4429    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4430    // If we had a '-' in front, toggle the sign bit.
4431    IntVal ^= (uint64_t)isNegative << 31;
4432    Parser.Lex(); // Eat the token.
4433    Operands.push_back(ARMOperand::CreateImm(
4434          MCConstantExpr::Create(IntVal, getContext()),
4435          S, Parser.getTok().getLoc()));
4436    return MatchOperand_Success;
4437  }
4438  // Also handle plain integers. Instructions which allow floating point
4439  // immediates also allow a raw encoded 8-bit value.
4440  if (Tok.is(AsmToken::Integer)) {
4441    int64_t Val = Tok.getIntVal();
4442    Parser.Lex(); // Eat the token.
4443    if (Val > 255 || Val < 0) {
4444      Error(Loc, "encoded floating point value out of range");
4445      return MatchOperand_ParseFail;
4446    }
4447    double RealVal = ARM_AM::getFPImmFloat(Val);
4448    Val = APFloat(APFloat::IEEEdouble, RealVal).bitcastToAPInt().getZExtValue();
4449    Operands.push_back(ARMOperand::CreateImm(
4450        MCConstantExpr::Create(Val, getContext()), S,
4451        Parser.getTok().getLoc()));
4452    return MatchOperand_Success;
4453  }
4454
4455  Error(Loc, "invalid floating point immediate");
4456  return MatchOperand_ParseFail;
4457}
4458
4459/// Parse a arm instruction operand.  For now this parses the operand regardless
4460/// of the mnemonic.
4461bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4462                                StringRef Mnemonic) {
4463  SMLoc S, E;
4464
4465  // Check if the current operand has a custom associated parser, if so, try to
4466  // custom parse the operand, or fallback to the general approach.
4467  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4468  if (ResTy == MatchOperand_Success)
4469    return false;
4470  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4471  // there was a match, but an error occurred, in which case, just return that
4472  // the operand parsing failed.
4473  if (ResTy == MatchOperand_ParseFail)
4474    return true;
4475
4476  switch (getLexer().getKind()) {
4477  default:
4478    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4479    return true;
4480  case AsmToken::Identifier: {
4481    if (!tryParseRegisterWithWriteBack(Operands))
4482      return false;
4483    int Res = tryParseShiftRegister(Operands);
4484    if (Res == 0) // success
4485      return false;
4486    else if (Res == -1) // irrecoverable error
4487      return true;
4488    // If this is VMRS, check for the apsr_nzcv operand.
4489    if (Mnemonic == "vmrs" &&
4490        Parser.getTok().getString().equals_lower("apsr_nzcv")) {
4491      S = Parser.getTok().getLoc();
4492      Parser.Lex();
4493      Operands.push_back(ARMOperand::CreateToken("APSR_nzcv", S));
4494      return false;
4495    }
4496
4497    // Fall though for the Identifier case that is not a register or a
4498    // special name.
4499  }
4500  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4501  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4502  case AsmToken::String:  // quoted label names.
4503  case AsmToken::Dot: {   // . as a branch target
4504    // This was not a register so parse other operands that start with an
4505    // identifier (like labels) as expressions and create them as immediates.
4506    const MCExpr *IdVal;
4507    S = Parser.getTok().getLoc();
4508    if (getParser().ParseExpression(IdVal))
4509      return true;
4510    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4511    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4512    return false;
4513  }
4514  case AsmToken::LBrac:
4515    return parseMemory(Operands);
4516  case AsmToken::LCurly:
4517    return parseRegisterList(Operands);
4518  case AsmToken::Dollar:
4519  case AsmToken::Hash: {
4520    // #42 -> immediate.
4521    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4522    S = Parser.getTok().getLoc();
4523    Parser.Lex();
4524    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4525    const MCExpr *ImmVal;
4526    if (getParser().ParseExpression(ImmVal))
4527      return true;
4528    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4529    if (CE) {
4530      int32_t Val = CE->getValue();
4531      if (isNegative && Val == 0)
4532        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4533    }
4534    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4535    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4536    return false;
4537  }
4538  case AsmToken::Colon: {
4539    // ":lower16:" and ":upper16:" expression prefixes
4540    // FIXME: Check it's an expression prefix,
4541    // e.g. (FOO - :lower16:BAR) isn't legal.
4542    ARMMCExpr::VariantKind RefKind;
4543    if (parsePrefix(RefKind))
4544      return true;
4545
4546    const MCExpr *SubExprVal;
4547    if (getParser().ParseExpression(SubExprVal))
4548      return true;
4549
4550    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4551                                                   getContext());
4552    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4553    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4554    return false;
4555  }
4556  }
4557}
4558
4559// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4560//  :lower16: and :upper16:.
4561bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4562  RefKind = ARMMCExpr::VK_ARM_None;
4563
4564  // :lower16: and :upper16: modifiers
4565  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4566  Parser.Lex(); // Eat ':'
4567
4568  if (getLexer().isNot(AsmToken::Identifier)) {
4569    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4570    return true;
4571  }
4572
4573  StringRef IDVal = Parser.getTok().getIdentifier();
4574  if (IDVal == "lower16") {
4575    RefKind = ARMMCExpr::VK_ARM_LO16;
4576  } else if (IDVal == "upper16") {
4577    RefKind = ARMMCExpr::VK_ARM_HI16;
4578  } else {
4579    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4580    return true;
4581  }
4582  Parser.Lex();
4583
4584  if (getLexer().isNot(AsmToken::Colon)) {
4585    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4586    return true;
4587  }
4588  Parser.Lex(); // Eat the last ':'
4589  return false;
4590}
4591
4592/// \brief Given a mnemonic, split out possible predication code and carry
4593/// setting letters to form a canonical mnemonic and flags.
4594//
4595// FIXME: Would be nice to autogen this.
4596// FIXME: This is a bit of a maze of special cases.
4597StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4598                                      unsigned &PredicationCode,
4599                                      bool &CarrySetting,
4600                                      unsigned &ProcessorIMod,
4601                                      StringRef &ITMask) {
4602  PredicationCode = ARMCC::AL;
4603  CarrySetting = false;
4604  ProcessorIMod = 0;
4605
4606  // Ignore some mnemonics we know aren't predicated forms.
4607  //
4608  // FIXME: Would be nice to autogen this.
4609  if ((Mnemonic == "movs" && isThumb()) ||
4610      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4611      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4612      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4613      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4614      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4615      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4616      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4617      Mnemonic == "fmuls")
4618    return Mnemonic;
4619
4620  // First, split out any predication code. Ignore mnemonics we know aren't
4621  // predicated but do have a carry-set and so weren't caught above.
4622  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4623      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4624      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4625      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4626    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4627      .Case("eq", ARMCC::EQ)
4628      .Case("ne", ARMCC::NE)
4629      .Case("hs", ARMCC::HS)
4630      .Case("cs", ARMCC::HS)
4631      .Case("lo", ARMCC::LO)
4632      .Case("cc", ARMCC::LO)
4633      .Case("mi", ARMCC::MI)
4634      .Case("pl", ARMCC::PL)
4635      .Case("vs", ARMCC::VS)
4636      .Case("vc", ARMCC::VC)
4637      .Case("hi", ARMCC::HI)
4638      .Case("ls", ARMCC::LS)
4639      .Case("ge", ARMCC::GE)
4640      .Case("lt", ARMCC::LT)
4641      .Case("gt", ARMCC::GT)
4642      .Case("le", ARMCC::LE)
4643      .Case("al", ARMCC::AL)
4644      .Default(~0U);
4645    if (CC != ~0U) {
4646      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4647      PredicationCode = CC;
4648    }
4649  }
4650
4651  // Next, determine if we have a carry setting bit. We explicitly ignore all
4652  // the instructions we know end in 's'.
4653  if (Mnemonic.endswith("s") &&
4654      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4655        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4656        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4657        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4658        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4659        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4660        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4661        Mnemonic == "fmuls" || Mnemonic == "fcmps" || Mnemonic == "fcmpzs" ||
4662        (Mnemonic == "movs" && isThumb()))) {
4663    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4664    CarrySetting = true;
4665  }
4666
4667  // The "cps" instruction can have a interrupt mode operand which is glued into
4668  // the mnemonic. Check if this is the case, split it and parse the imod op
4669  if (Mnemonic.startswith("cps")) {
4670    // Split out any imod code.
4671    unsigned IMod =
4672      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4673      .Case("ie", ARM_PROC::IE)
4674      .Case("id", ARM_PROC::ID)
4675      .Default(~0U);
4676    if (IMod != ~0U) {
4677      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4678      ProcessorIMod = IMod;
4679    }
4680  }
4681
4682  // The "it" instruction has the condition mask on the end of the mnemonic.
4683  if (Mnemonic.startswith("it")) {
4684    ITMask = Mnemonic.slice(2, Mnemonic.size());
4685    Mnemonic = Mnemonic.slice(0, 2);
4686  }
4687
4688  return Mnemonic;
4689}
4690
4691/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4692/// inclusion of carry set or predication code operands.
4693//
4694// FIXME: It would be nice to autogen this.
4695void ARMAsmParser::
4696getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4697                      bool &CanAcceptPredicationCode) {
4698  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4699      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4700      Mnemonic == "add" || Mnemonic == "adc" ||
4701      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4702      Mnemonic == "orr" || Mnemonic == "mvn" ||
4703      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4704      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4705      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4706                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4707                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4708    CanAcceptCarrySet = true;
4709  } else
4710    CanAcceptCarrySet = false;
4711
4712  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4713      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4714      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4715      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4716      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4717      (Mnemonic == "clrex" && !isThumb()) ||
4718      (Mnemonic == "nop" && isThumbOne()) ||
4719      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4720        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4721        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4722      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4723       !isThumb()) ||
4724      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4725    CanAcceptPredicationCode = false;
4726  } else
4727    CanAcceptPredicationCode = true;
4728
4729  if (isThumb()) {
4730    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4731        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4732      CanAcceptPredicationCode = false;
4733  }
4734}
4735
4736bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4737                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4738  // FIXME: This is all horribly hacky. We really need a better way to deal
4739  // with optional operands like this in the matcher table.
4740
4741  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4742  // another does not. Specifically, the MOVW instruction does not. So we
4743  // special case it here and remove the defaulted (non-setting) cc_out
4744  // operand if that's the instruction we're trying to match.
4745  //
4746  // We do this as post-processing of the explicit operands rather than just
4747  // conditionally adding the cc_out in the first place because we need
4748  // to check the type of the parsed immediate operand.
4749  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4750      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4751      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4752      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4753    return true;
4754
4755  // Register-register 'add' for thumb does not have a cc_out operand
4756  // when there are only two register operands.
4757  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4758      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4759      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4760      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4761    return true;
4762  // Register-register 'add' for thumb does not have a cc_out operand
4763  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4764  // have to check the immediate range here since Thumb2 has a variant
4765  // that can handle a different range and has a cc_out operand.
4766  if (((isThumb() && Mnemonic == "add") ||
4767       (isThumbTwo() && Mnemonic == "sub")) &&
4768      Operands.size() == 6 &&
4769      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4770      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4771      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4772      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4773      ((Mnemonic == "add" &&static_cast<ARMOperand*>(Operands[5])->isReg()) ||
4774       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4775    return true;
4776  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4777  // imm0_4095 variant. That's the least-preferred variant when
4778  // selecting via the generic "add" mnemonic, so to know that we
4779  // should remove the cc_out operand, we have to explicitly check that
4780  // it's not one of the other variants. Ugh.
4781  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4782      Operands.size() == 6 &&
4783      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4784      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4785      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4786    // Nest conditions rather than one big 'if' statement for readability.
4787    //
4788    // If either register is a high reg, it's either one of the SP
4789    // variants (handled above) or a 32-bit encoding, so we just
4790    // check against T3. If the second register is the PC, this is an
4791    // alternate form of ADR, which uses encoding T4, so check for that too.
4792    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4793         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4794        static_cast<ARMOperand*>(Operands[4])->getReg() != ARM::PC &&
4795        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4796      return false;
4797    // If both registers are low, we're in an IT block, and the immediate is
4798    // in range, we should use encoding T1 instead, which has a cc_out.
4799    if (inITBlock() &&
4800        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4801        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4802        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4803      return false;
4804
4805    // Otherwise, we use encoding T4, which does not have a cc_out
4806    // operand.
4807    return true;
4808  }
4809
4810  // The thumb2 multiply instruction doesn't have a CCOut register, so
4811  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4812  // use the 16-bit encoding or not.
4813  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4814      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4815      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4816      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4817      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4818      // If the registers aren't low regs, the destination reg isn't the
4819      // same as one of the source regs, or the cc_out operand is zero
4820      // outside of an IT block, we have to use the 32-bit encoding, so
4821      // remove the cc_out operand.
4822      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4823       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4824       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4825       !inITBlock() ||
4826       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4827        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4828        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4829        static_cast<ARMOperand*>(Operands[4])->getReg())))
4830    return true;
4831
4832  // Also check the 'mul' syntax variant that doesn't specify an explicit
4833  // destination register.
4834  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4835      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4836      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4837      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4838      // If the registers aren't low regs  or the cc_out operand is zero
4839      // outside of an IT block, we have to use the 32-bit encoding, so
4840      // remove the cc_out operand.
4841      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4842       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4843       !inITBlock()))
4844    return true;
4845
4846
4847
4848  // Register-register 'add/sub' for thumb does not have a cc_out operand
4849  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4850  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4851  // right, this will result in better diagnostics (which operand is off)
4852  // anyway.
4853  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4854      (Operands.size() == 5 || Operands.size() == 6) &&
4855      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4856      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4857      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4858      (static_cast<ARMOperand*>(Operands[4])->isImm() ||
4859       (Operands.size() == 6 &&
4860        static_cast<ARMOperand*>(Operands[5])->isImm())))
4861    return true;
4862
4863  return false;
4864}
4865
4866static bool isDataTypeToken(StringRef Tok) {
4867  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4868    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4869    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4870    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4871    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4872    Tok == ".f" || Tok == ".d";
4873}
4874
4875// FIXME: This bit should probably be handled via an explicit match class
4876// in the .td files that matches the suffix instead of having it be
4877// a literal string token the way it is now.
4878static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4879  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4880}
4881
4882static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4883/// Parse an arm instruction mnemonic followed by its operands.
4884bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4885                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4886  // Apply mnemonic aliases before doing anything else, as the destination
4887  // mnemnonic may include suffices and we want to handle them normally.
4888  // The generic tblgen'erated code does this later, at the start of
4889  // MatchInstructionImpl(), but that's too late for aliases that include
4890  // any sort of suffix.
4891  unsigned AvailableFeatures = getAvailableFeatures();
4892  applyMnemonicAliases(Name, AvailableFeatures);
4893
4894  // First check for the ARM-specific .req directive.
4895  if (Parser.getTok().is(AsmToken::Identifier) &&
4896      Parser.getTok().getIdentifier() == ".req") {
4897    parseDirectiveReq(Name, NameLoc);
4898    // We always return 'error' for this, as we're done with this
4899    // statement and don't need to match the 'instruction."
4900    return true;
4901  }
4902
4903  // Create the leading tokens for the mnemonic, split by '.' characters.
4904  size_t Start = 0, Next = Name.find('.');
4905  StringRef Mnemonic = Name.slice(Start, Next);
4906
4907  // Split out the predication code and carry setting flag from the mnemonic.
4908  unsigned PredicationCode;
4909  unsigned ProcessorIMod;
4910  bool CarrySetting;
4911  StringRef ITMask;
4912  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4913                           ProcessorIMod, ITMask);
4914
4915  // In Thumb1, only the branch (B) instruction can be predicated.
4916  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4917    Parser.EatToEndOfStatement();
4918    return Error(NameLoc, "conditional execution not supported in Thumb1");
4919  }
4920
4921  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4922
4923  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4924  // is the mask as it will be for the IT encoding if the conditional
4925  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4926  // where the conditional bit0 is zero, the instruction post-processing
4927  // will adjust the mask accordingly.
4928  if (Mnemonic == "it") {
4929    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4930    if (ITMask.size() > 3) {
4931      Parser.EatToEndOfStatement();
4932      return Error(Loc, "too many conditions on IT instruction");
4933    }
4934    unsigned Mask = 8;
4935    for (unsigned i = ITMask.size(); i != 0; --i) {
4936      char pos = ITMask[i - 1];
4937      if (pos != 't' && pos != 'e') {
4938        Parser.EatToEndOfStatement();
4939        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4940      }
4941      Mask >>= 1;
4942      if (ITMask[i - 1] == 't')
4943        Mask |= 8;
4944    }
4945    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4946  }
4947
4948  // FIXME: This is all a pretty gross hack. We should automatically handle
4949  // optional operands like this via tblgen.
4950
4951  // Next, add the CCOut and ConditionCode operands, if needed.
4952  //
4953  // For mnemonics which can ever incorporate a carry setting bit or predication
4954  // code, our matching model involves us always generating CCOut and
4955  // ConditionCode operands to match the mnemonic "as written" and then we let
4956  // the matcher deal with finding the right instruction or generating an
4957  // appropriate error.
4958  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4959  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4960
4961  // If we had a carry-set on an instruction that can't do that, issue an
4962  // error.
4963  if (!CanAcceptCarrySet && CarrySetting) {
4964    Parser.EatToEndOfStatement();
4965    return Error(NameLoc, "instruction '" + Mnemonic +
4966                 "' can not set flags, but 's' suffix specified");
4967  }
4968  // If we had a predication code on an instruction that can't do that, issue an
4969  // error.
4970  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4971    Parser.EatToEndOfStatement();
4972    return Error(NameLoc, "instruction '" + Mnemonic +
4973                 "' is not predicable, but condition code specified");
4974  }
4975
4976  // Add the carry setting operand, if necessary.
4977  if (CanAcceptCarrySet) {
4978    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4979    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4980                                               Loc));
4981  }
4982
4983  // Add the predication code operand, if necessary.
4984  if (CanAcceptPredicationCode) {
4985    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4986                                      CarrySetting);
4987    Operands.push_back(ARMOperand::CreateCondCode(
4988                         ARMCC::CondCodes(PredicationCode), Loc));
4989  }
4990
4991  // Add the processor imod operand, if necessary.
4992  if (ProcessorIMod) {
4993    Operands.push_back(ARMOperand::CreateImm(
4994          MCConstantExpr::Create(ProcessorIMod, getContext()),
4995                                 NameLoc, NameLoc));
4996  }
4997
4998  // Add the remaining tokens in the mnemonic.
4999  while (Next != StringRef::npos) {
5000    Start = Next;
5001    Next = Name.find('.', Start + 1);
5002    StringRef ExtraToken = Name.slice(Start, Next);
5003
5004    // Some NEON instructions have an optional datatype suffix that is
5005    // completely ignored. Check for that.
5006    if (isDataTypeToken(ExtraToken) &&
5007        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
5008      continue;
5009
5010    if (ExtraToken != ".n") {
5011      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
5012      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
5013    }
5014  }
5015
5016  // Read the remaining operands.
5017  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5018    // Read the first operand.
5019    if (parseOperand(Operands, Mnemonic)) {
5020      Parser.EatToEndOfStatement();
5021      return true;
5022    }
5023
5024    while (getLexer().is(AsmToken::Comma)) {
5025      Parser.Lex();  // Eat the comma.
5026
5027      // Parse and remember the operand.
5028      if (parseOperand(Operands, Mnemonic)) {
5029        Parser.EatToEndOfStatement();
5030        return true;
5031      }
5032    }
5033  }
5034
5035  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5036    SMLoc Loc = getLexer().getLoc();
5037    Parser.EatToEndOfStatement();
5038    return Error(Loc, "unexpected token in argument list");
5039  }
5040
5041  Parser.Lex(); // Consume the EndOfStatement
5042
5043  // Some instructions, mostly Thumb, have forms for the same mnemonic that
5044  // do and don't have a cc_out optional-def operand. With some spot-checks
5045  // of the operand list, we can figure out which variant we're trying to
5046  // parse and adjust accordingly before actually matching. We shouldn't ever
5047  // try to remove a cc_out operand that was explicitly set on the the
5048  // mnemonic, of course (CarrySetting == true). Reason number #317 the
5049  // table driven matcher doesn't fit well with the ARM instruction set.
5050  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
5051    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5052    Operands.erase(Operands.begin() + 1);
5053    delete Op;
5054  }
5055
5056  // ARM mode 'blx' need special handling, as the register operand version
5057  // is predicable, but the label operand version is not. So, we can't rely
5058  // on the Mnemonic based checking to correctly figure out when to put
5059  // a k_CondCode operand in the list. If we're trying to match the label
5060  // version, remove the k_CondCode operand here.
5061  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
5062      static_cast<ARMOperand*>(Operands[2])->isImm()) {
5063    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
5064    Operands.erase(Operands.begin() + 1);
5065    delete Op;
5066  }
5067
5068  // The vector-compare-to-zero instructions have a literal token "#0" at
5069  // the end that comes to here as an immediate operand. Convert it to a
5070  // token to play nicely with the matcher.
5071  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
5072      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
5073      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5074    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5075    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5076    if (CE && CE->getValue() == 0) {
5077      Operands.erase(Operands.begin() + 5);
5078      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5079      delete Op;
5080    }
5081  }
5082  // VCMP{E} does the same thing, but with a different operand count.
5083  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
5084      static_cast<ARMOperand*>(Operands[4])->isImm()) {
5085    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
5086    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5087    if (CE && CE->getValue() == 0) {
5088      Operands.erase(Operands.begin() + 4);
5089      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5090      delete Op;
5091    }
5092  }
5093  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
5094  // end. Convert it to a token here. Take care not to convert those
5095  // that should hit the Thumb2 encoding.
5096  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
5097      static_cast<ARMOperand*>(Operands[3])->isReg() &&
5098      static_cast<ARMOperand*>(Operands[4])->isReg() &&
5099      static_cast<ARMOperand*>(Operands[5])->isImm()) {
5100    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
5101    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
5102    if (CE && CE->getValue() == 0 &&
5103        (isThumbOne() ||
5104         // The cc_out operand matches the IT block.
5105         ((inITBlock() != CarrySetting) &&
5106         // Neither register operand is a high register.
5107         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
5108          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
5109      Operands.erase(Operands.begin() + 5);
5110      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
5111      delete Op;
5112    }
5113  }
5114
5115  return false;
5116}
5117
5118// Validate context-sensitive operand constraints.
5119
5120// return 'true' if register list contains non-low GPR registers,
5121// 'false' otherwise. If Reg is in the register list or is HiReg, set
5122// 'containsReg' to true.
5123static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
5124                                 unsigned HiReg, bool &containsReg) {
5125  containsReg = false;
5126  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5127    unsigned OpReg = Inst.getOperand(i).getReg();
5128    if (OpReg == Reg)
5129      containsReg = true;
5130    // Anything other than a low register isn't legal here.
5131    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
5132      return true;
5133  }
5134  return false;
5135}
5136
5137// Check if the specified regisgter is in the register list of the inst,
5138// starting at the indicated operand number.
5139static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
5140  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
5141    unsigned OpReg = Inst.getOperand(i).getReg();
5142    if (OpReg == Reg)
5143      return true;
5144  }
5145  return false;
5146}
5147
5148// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
5149// the ARMInsts array) instead. Getting that here requires awkward
5150// API changes, though. Better way?
5151namespace llvm {
5152extern const MCInstrDesc ARMInsts[];
5153}
5154static const MCInstrDesc &getInstDesc(unsigned Opcode) {
5155  return ARMInsts[Opcode];
5156}
5157
5158// FIXME: We would really like to be able to tablegen'erate this.
5159bool ARMAsmParser::
5160validateInstruction(MCInst &Inst,
5161                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5162  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
5163  SMLoc Loc = Operands[0]->getStartLoc();
5164  // Check the IT block state first.
5165  // NOTE: BKPT instruction has the interesting property of being
5166  // allowed in IT blocks, but not being predicable.  It just always
5167  // executes.
5168  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT &&
5169      Inst.getOpcode() != ARM::BKPT) {
5170    unsigned bit = 1;
5171    if (ITState.FirstCond)
5172      ITState.FirstCond = false;
5173    else
5174      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
5175    // The instruction must be predicable.
5176    if (!MCID.isPredicable())
5177      return Error(Loc, "instructions in IT block must be predicable");
5178    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
5179    unsigned ITCond = bit ? ITState.Cond :
5180      ARMCC::getOppositeCondition(ITState.Cond);
5181    if (Cond != ITCond) {
5182      // Find the condition code Operand to get its SMLoc information.
5183      SMLoc CondLoc;
5184      for (unsigned i = 1; i < Operands.size(); ++i)
5185        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
5186          CondLoc = Operands[i]->getStartLoc();
5187      return Error(CondLoc, "incorrect condition in IT block; got '" +
5188                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
5189                   "', but expected '" +
5190                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
5191    }
5192  // Check for non-'al' condition codes outside of the IT block.
5193  } else if (isThumbTwo() && MCID.isPredicable() &&
5194             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
5195             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
5196             Inst.getOpcode() != ARM::t2B)
5197    return Error(Loc, "predicated instructions must be in IT block");
5198
5199  switch (Inst.getOpcode()) {
5200  case ARM::LDRD:
5201  case ARM::LDRD_PRE:
5202  case ARM::LDRD_POST:
5203  case ARM::LDREXD: {
5204    // Rt2 must be Rt + 1.
5205    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5206    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5207    if (Rt2 != Rt + 1)
5208      return Error(Operands[3]->getStartLoc(),
5209                   "destination operands must be sequential");
5210    return false;
5211  }
5212  case ARM::STRD: {
5213    // Rt2 must be Rt + 1.
5214    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5215    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5216    if (Rt2 != Rt + 1)
5217      return Error(Operands[3]->getStartLoc(),
5218                   "source operands must be sequential");
5219    return false;
5220  }
5221  case ARM::STRD_PRE:
5222  case ARM::STRD_POST:
5223  case ARM::STREXD: {
5224    // Rt2 must be Rt + 1.
5225    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5226    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5227    if (Rt2 != Rt + 1)
5228      return Error(Operands[3]->getStartLoc(),
5229                   "source operands must be sequential");
5230    return false;
5231  }
5232  case ARM::SBFX:
5233  case ARM::UBFX: {
5234    // width must be in range [1, 32-lsb]
5235    unsigned lsb = Inst.getOperand(2).getImm();
5236    unsigned widthm1 = Inst.getOperand(3).getImm();
5237    if (widthm1 >= 32 - lsb)
5238      return Error(Operands[5]->getStartLoc(),
5239                   "bitfield width must be in range [1,32-lsb]");
5240    return false;
5241  }
5242  case ARM::tLDMIA: {
5243    // If we're parsing Thumb2, the .w variant is available and handles
5244    // most cases that are normally illegal for a Thumb1 LDM
5245    // instruction. We'll make the transformation in processInstruction()
5246    // if necessary.
5247    //
5248    // Thumb LDM instructions are writeback iff the base register is not
5249    // in the register list.
5250    unsigned Rn = Inst.getOperand(0).getReg();
5251    bool hasWritebackToken =
5252      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5253       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5254    bool listContainsBase;
5255    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5256      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5257                   "registers must be in range r0-r7");
5258    // If we should have writeback, then there should be a '!' token.
5259    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5260      return Error(Operands[2]->getStartLoc(),
5261                   "writeback operator '!' expected");
5262    // If we should not have writeback, there must not be a '!'. This is
5263    // true even for the 32-bit wide encodings.
5264    if (listContainsBase && hasWritebackToken)
5265      return Error(Operands[3]->getStartLoc(),
5266                   "writeback operator '!' not allowed when base register "
5267                   "in register list");
5268
5269    break;
5270  }
5271  case ARM::t2LDMIA_UPD: {
5272    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5273      return Error(Operands[4]->getStartLoc(),
5274                   "writeback operator '!' not allowed when base register "
5275                   "in register list");
5276    break;
5277  }
5278  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5279  // so only issue a diagnostic for thumb1. The instructions will be
5280  // switched to the t2 encodings in processInstruction() if necessary.
5281  case ARM::tPOP: {
5282    bool listContainsBase;
5283    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5284        !isThumbTwo())
5285      return Error(Operands[2]->getStartLoc(),
5286                   "registers must be in range r0-r7 or pc");
5287    break;
5288  }
5289  case ARM::tPUSH: {
5290    bool listContainsBase;
5291    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5292        !isThumbTwo())
5293      return Error(Operands[2]->getStartLoc(),
5294                   "registers must be in range r0-r7 or lr");
5295    break;
5296  }
5297  case ARM::tSTMIA_UPD: {
5298    bool listContainsBase;
5299    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5300      return Error(Operands[4]->getStartLoc(),
5301                   "registers must be in range r0-r7");
5302    break;
5303  }
5304  }
5305
5306  return false;
5307}
5308
5309static unsigned getRealVSTOpcode(unsigned Opc, unsigned &Spacing) {
5310  switch(Opc) {
5311  default: llvm_unreachable("unexpected opcode!");
5312  // VST1LN
5313  case ARM::VST1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5314  case ARM::VST1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5315  case ARM::VST1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5316  case ARM::VST1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST1LNd8_UPD;
5317  case ARM::VST1LNdWB_register_Asm_16: Spacing = 1; return ARM::VST1LNd16_UPD;
5318  case ARM::VST1LNdWB_register_Asm_32: Spacing = 1; return ARM::VST1LNd32_UPD;
5319  case ARM::VST1LNdAsm_8:  Spacing = 1; return ARM::VST1LNd8;
5320  case ARM::VST1LNdAsm_16: Spacing = 1; return ARM::VST1LNd16;
5321  case ARM::VST1LNdAsm_32: Spacing = 1; return ARM::VST1LNd32;
5322
5323  // VST2LN
5324  case ARM::VST2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5325  case ARM::VST2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5326  case ARM::VST2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5327  case ARM::VST2LNqWB_fixed_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5328  case ARM::VST2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5329
5330  case ARM::VST2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST2LNd8_UPD;
5331  case ARM::VST2LNdWB_register_Asm_16: Spacing = 1; return ARM::VST2LNd16_UPD;
5332  case ARM::VST2LNdWB_register_Asm_32: Spacing = 1; return ARM::VST2LNd32_UPD;
5333  case ARM::VST2LNqWB_register_Asm_16: Spacing = 2; return ARM::VST2LNq16_UPD;
5334  case ARM::VST2LNqWB_register_Asm_32: Spacing = 2; return ARM::VST2LNq32_UPD;
5335
5336  case ARM::VST2LNdAsm_8:  Spacing = 1; return ARM::VST2LNd8;
5337  case ARM::VST2LNdAsm_16: Spacing = 1; return ARM::VST2LNd16;
5338  case ARM::VST2LNdAsm_32: Spacing = 1; return ARM::VST2LNd32;
5339  case ARM::VST2LNqAsm_16: Spacing = 2; return ARM::VST2LNq16;
5340  case ARM::VST2LNqAsm_32: Spacing = 2; return ARM::VST2LNq32;
5341
5342  // VST3LN
5343  case ARM::VST3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5344  case ARM::VST3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5345  case ARM::VST3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5346  case ARM::VST3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST3LNq16_UPD;
5347  case ARM::VST3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5348  case ARM::VST3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST3LNd8_UPD;
5349  case ARM::VST3LNdWB_register_Asm_16: Spacing = 1; return ARM::VST3LNd16_UPD;
5350  case ARM::VST3LNdWB_register_Asm_32: Spacing = 1; return ARM::VST3LNd32_UPD;
5351  case ARM::VST3LNqWB_register_Asm_16: Spacing = 2; return ARM::VST3LNq16_UPD;
5352  case ARM::VST3LNqWB_register_Asm_32: Spacing = 2; return ARM::VST3LNq32_UPD;
5353  case ARM::VST3LNdAsm_8:  Spacing = 1; return ARM::VST3LNd8;
5354  case ARM::VST3LNdAsm_16: Spacing = 1; return ARM::VST3LNd16;
5355  case ARM::VST3LNdAsm_32: Spacing = 1; return ARM::VST3LNd32;
5356  case ARM::VST3LNqAsm_16: Spacing = 2; return ARM::VST3LNq16;
5357  case ARM::VST3LNqAsm_32: Spacing = 2; return ARM::VST3LNq32;
5358
5359  // VST3
5360  case ARM::VST3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5361  case ARM::VST3dWB_fixed_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5362  case ARM::VST3dWB_fixed_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5363  case ARM::VST3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5364  case ARM::VST3qWB_fixed_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5365  case ARM::VST3qWB_fixed_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5366  case ARM::VST3dWB_register_Asm_8:  Spacing = 1; return ARM::VST3d8_UPD;
5367  case ARM::VST3dWB_register_Asm_16: Spacing = 1; return ARM::VST3d16_UPD;
5368  case ARM::VST3dWB_register_Asm_32: Spacing = 1; return ARM::VST3d32_UPD;
5369  case ARM::VST3qWB_register_Asm_8:  Spacing = 2; return ARM::VST3q8_UPD;
5370  case ARM::VST3qWB_register_Asm_16: Spacing = 2; return ARM::VST3q16_UPD;
5371  case ARM::VST3qWB_register_Asm_32: Spacing = 2; return ARM::VST3q32_UPD;
5372  case ARM::VST3dAsm_8:  Spacing = 1; return ARM::VST3d8;
5373  case ARM::VST3dAsm_16: Spacing = 1; return ARM::VST3d16;
5374  case ARM::VST3dAsm_32: Spacing = 1; return ARM::VST3d32;
5375  case ARM::VST3qAsm_8:  Spacing = 2; return ARM::VST3q8;
5376  case ARM::VST3qAsm_16: Spacing = 2; return ARM::VST3q16;
5377  case ARM::VST3qAsm_32: Spacing = 2; return ARM::VST3q32;
5378
5379  // VST4LN
5380  case ARM::VST4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5381  case ARM::VST4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5382  case ARM::VST4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5383  case ARM::VST4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VST4LNq16_UPD;
5384  case ARM::VST4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5385  case ARM::VST4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VST4LNd8_UPD;
5386  case ARM::VST4LNdWB_register_Asm_16: Spacing = 1; return ARM::VST4LNd16_UPD;
5387  case ARM::VST4LNdWB_register_Asm_32: Spacing = 1; return ARM::VST4LNd32_UPD;
5388  case ARM::VST4LNqWB_register_Asm_16: Spacing = 2; return ARM::VST4LNq16_UPD;
5389  case ARM::VST4LNqWB_register_Asm_32: Spacing = 2; return ARM::VST4LNq32_UPD;
5390  case ARM::VST4LNdAsm_8:  Spacing = 1; return ARM::VST4LNd8;
5391  case ARM::VST4LNdAsm_16: Spacing = 1; return ARM::VST4LNd16;
5392  case ARM::VST4LNdAsm_32: Spacing = 1; return ARM::VST4LNd32;
5393  case ARM::VST4LNqAsm_16: Spacing = 2; return ARM::VST4LNq16;
5394  case ARM::VST4LNqAsm_32: Spacing = 2; return ARM::VST4LNq32;
5395
5396  // VST4
5397  case ARM::VST4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5398  case ARM::VST4dWB_fixed_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5399  case ARM::VST4dWB_fixed_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5400  case ARM::VST4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5401  case ARM::VST4qWB_fixed_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5402  case ARM::VST4qWB_fixed_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5403  case ARM::VST4dWB_register_Asm_8:  Spacing = 1; return ARM::VST4d8_UPD;
5404  case ARM::VST4dWB_register_Asm_16: Spacing = 1; return ARM::VST4d16_UPD;
5405  case ARM::VST4dWB_register_Asm_32: Spacing = 1; return ARM::VST4d32_UPD;
5406  case ARM::VST4qWB_register_Asm_8:  Spacing = 2; return ARM::VST4q8_UPD;
5407  case ARM::VST4qWB_register_Asm_16: Spacing = 2; return ARM::VST4q16_UPD;
5408  case ARM::VST4qWB_register_Asm_32: Spacing = 2; return ARM::VST4q32_UPD;
5409  case ARM::VST4dAsm_8:  Spacing = 1; return ARM::VST4d8;
5410  case ARM::VST4dAsm_16: Spacing = 1; return ARM::VST4d16;
5411  case ARM::VST4dAsm_32: Spacing = 1; return ARM::VST4d32;
5412  case ARM::VST4qAsm_8:  Spacing = 2; return ARM::VST4q8;
5413  case ARM::VST4qAsm_16: Spacing = 2; return ARM::VST4q16;
5414  case ARM::VST4qAsm_32: Spacing = 2; return ARM::VST4q32;
5415  }
5416}
5417
5418static unsigned getRealVLDOpcode(unsigned Opc, unsigned &Spacing) {
5419  switch(Opc) {
5420  default: llvm_unreachable("unexpected opcode!");
5421  // VLD1LN
5422  case ARM::VLD1LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5423  case ARM::VLD1LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5424  case ARM::VLD1LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5425  case ARM::VLD1LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD1LNd8_UPD;
5426  case ARM::VLD1LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD1LNd16_UPD;
5427  case ARM::VLD1LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD1LNd32_UPD;
5428  case ARM::VLD1LNdAsm_8:  Spacing = 1; return ARM::VLD1LNd8;
5429  case ARM::VLD1LNdAsm_16: Spacing = 1; return ARM::VLD1LNd16;
5430  case ARM::VLD1LNdAsm_32: Spacing = 1; return ARM::VLD1LNd32;
5431
5432  // VLD2LN
5433  case ARM::VLD2LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5434  case ARM::VLD2LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5435  case ARM::VLD2LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5436  case ARM::VLD2LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD2LNq16_UPD;
5437  case ARM::VLD2LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5438  case ARM::VLD2LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD2LNd8_UPD;
5439  case ARM::VLD2LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD2LNd16_UPD;
5440  case ARM::VLD2LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD2LNd32_UPD;
5441  case ARM::VLD2LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD2LNq16_UPD;
5442  case ARM::VLD2LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD2LNq32_UPD;
5443  case ARM::VLD2LNdAsm_8:  Spacing = 1; return ARM::VLD2LNd8;
5444  case ARM::VLD2LNdAsm_16: Spacing = 1; return ARM::VLD2LNd16;
5445  case ARM::VLD2LNdAsm_32: Spacing = 1; return ARM::VLD2LNd32;
5446  case ARM::VLD2LNqAsm_16: Spacing = 2; return ARM::VLD2LNq16;
5447  case ARM::VLD2LNqAsm_32: Spacing = 2; return ARM::VLD2LNq32;
5448
5449  // VLD3DUP
5450  case ARM::VLD3DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5451  case ARM::VLD3DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5452  case ARM::VLD3DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5453  case ARM::VLD3DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD3DUPq8_UPD;
5454  case ARM::VLD3DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3DUPq16_UPD;
5455  case ARM::VLD3DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5456  case ARM::VLD3DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3DUPd8_UPD;
5457  case ARM::VLD3DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD3DUPd16_UPD;
5458  case ARM::VLD3DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD3DUPd32_UPD;
5459  case ARM::VLD3DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD3DUPq8_UPD;
5460  case ARM::VLD3DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD3DUPq16_UPD;
5461  case ARM::VLD3DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD3DUPq32_UPD;
5462  case ARM::VLD3DUPdAsm_8:  Spacing = 1; return ARM::VLD3DUPd8;
5463  case ARM::VLD3DUPdAsm_16: Spacing = 1; return ARM::VLD3DUPd16;
5464  case ARM::VLD3DUPdAsm_32: Spacing = 1; return ARM::VLD3DUPd32;
5465  case ARM::VLD3DUPqAsm_8: Spacing = 2; return ARM::VLD3DUPq8;
5466  case ARM::VLD3DUPqAsm_16: Spacing = 2; return ARM::VLD3DUPq16;
5467  case ARM::VLD3DUPqAsm_32: Spacing = 2; return ARM::VLD3DUPq32;
5468
5469  // VLD3LN
5470  case ARM::VLD3LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5471  case ARM::VLD3LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5472  case ARM::VLD3LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5473  case ARM::VLD3LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3LNq16_UPD;
5474  case ARM::VLD3LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5475  case ARM::VLD3LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD3LNd8_UPD;
5476  case ARM::VLD3LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD3LNd16_UPD;
5477  case ARM::VLD3LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD3LNd32_UPD;
5478  case ARM::VLD3LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD3LNq16_UPD;
5479  case ARM::VLD3LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD3LNq32_UPD;
5480  case ARM::VLD3LNdAsm_8:  Spacing = 1; return ARM::VLD3LNd8;
5481  case ARM::VLD3LNdAsm_16: Spacing = 1; return ARM::VLD3LNd16;
5482  case ARM::VLD3LNdAsm_32: Spacing = 1; return ARM::VLD3LNd32;
5483  case ARM::VLD3LNqAsm_16: Spacing = 2; return ARM::VLD3LNq16;
5484  case ARM::VLD3LNqAsm_32: Spacing = 2; return ARM::VLD3LNq32;
5485
5486  // VLD3
5487  case ARM::VLD3dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5488  case ARM::VLD3dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5489  case ARM::VLD3dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5490  case ARM::VLD3qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5491  case ARM::VLD3qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5492  case ARM::VLD3qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5493  case ARM::VLD3dWB_register_Asm_8:  Spacing = 1; return ARM::VLD3d8_UPD;
5494  case ARM::VLD3dWB_register_Asm_16: Spacing = 1; return ARM::VLD3d16_UPD;
5495  case ARM::VLD3dWB_register_Asm_32: Spacing = 1; return ARM::VLD3d32_UPD;
5496  case ARM::VLD3qWB_register_Asm_8:  Spacing = 2; return ARM::VLD3q8_UPD;
5497  case ARM::VLD3qWB_register_Asm_16: Spacing = 2; return ARM::VLD3q16_UPD;
5498  case ARM::VLD3qWB_register_Asm_32: Spacing = 2; return ARM::VLD3q32_UPD;
5499  case ARM::VLD3dAsm_8:  Spacing = 1; return ARM::VLD3d8;
5500  case ARM::VLD3dAsm_16: Spacing = 1; return ARM::VLD3d16;
5501  case ARM::VLD3dAsm_32: Spacing = 1; return ARM::VLD3d32;
5502  case ARM::VLD3qAsm_8:  Spacing = 2; return ARM::VLD3q8;
5503  case ARM::VLD3qAsm_16: Spacing = 2; return ARM::VLD3q16;
5504  case ARM::VLD3qAsm_32: Spacing = 2; return ARM::VLD3q32;
5505
5506  // VLD4LN
5507  case ARM::VLD4LNdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5508  case ARM::VLD4LNdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5509  case ARM::VLD4LNdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5510  case ARM::VLD4LNqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4LNq16_UPD;
5511  case ARM::VLD4LNqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5512  case ARM::VLD4LNdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4LNd8_UPD;
5513  case ARM::VLD4LNdWB_register_Asm_16: Spacing = 1; return ARM::VLD4LNd16_UPD;
5514  case ARM::VLD4LNdWB_register_Asm_32: Spacing = 1; return ARM::VLD4LNd32_UPD;
5515  case ARM::VLD4LNqWB_register_Asm_16: Spacing = 2; return ARM::VLD4LNq16_UPD;
5516  case ARM::VLD4LNqWB_register_Asm_32: Spacing = 2; return ARM::VLD4LNq32_UPD;
5517  case ARM::VLD4LNdAsm_8:  Spacing = 1; return ARM::VLD4LNd8;
5518  case ARM::VLD4LNdAsm_16: Spacing = 1; return ARM::VLD4LNd16;
5519  case ARM::VLD4LNdAsm_32: Spacing = 1; return ARM::VLD4LNd32;
5520  case ARM::VLD4LNqAsm_16: Spacing = 2; return ARM::VLD4LNq16;
5521  case ARM::VLD4LNqAsm_32: Spacing = 2; return ARM::VLD4LNq32;
5522
5523  // VLD4DUP
5524  case ARM::VLD4DUPdWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5525  case ARM::VLD4DUPdWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5526  case ARM::VLD4DUPdWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5527  case ARM::VLD4DUPqWB_fixed_Asm_8: Spacing = 1; return ARM::VLD4DUPq8_UPD;
5528  case ARM::VLD4DUPqWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4DUPq16_UPD;
5529  case ARM::VLD4DUPqWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5530  case ARM::VLD4DUPdWB_register_Asm_8:  Spacing = 1; return ARM::VLD4DUPd8_UPD;
5531  case ARM::VLD4DUPdWB_register_Asm_16: Spacing = 1; return ARM::VLD4DUPd16_UPD;
5532  case ARM::VLD4DUPdWB_register_Asm_32: Spacing = 1; return ARM::VLD4DUPd32_UPD;
5533  case ARM::VLD4DUPqWB_register_Asm_8: Spacing = 2; return ARM::VLD4DUPq8_UPD;
5534  case ARM::VLD4DUPqWB_register_Asm_16: Spacing = 2; return ARM::VLD4DUPq16_UPD;
5535  case ARM::VLD4DUPqWB_register_Asm_32: Spacing = 2; return ARM::VLD4DUPq32_UPD;
5536  case ARM::VLD4DUPdAsm_8:  Spacing = 1; return ARM::VLD4DUPd8;
5537  case ARM::VLD4DUPdAsm_16: Spacing = 1; return ARM::VLD4DUPd16;
5538  case ARM::VLD4DUPdAsm_32: Spacing = 1; return ARM::VLD4DUPd32;
5539  case ARM::VLD4DUPqAsm_8: Spacing = 2; return ARM::VLD4DUPq8;
5540  case ARM::VLD4DUPqAsm_16: Spacing = 2; return ARM::VLD4DUPq16;
5541  case ARM::VLD4DUPqAsm_32: Spacing = 2; return ARM::VLD4DUPq32;
5542
5543  // VLD4
5544  case ARM::VLD4dWB_fixed_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5545  case ARM::VLD4dWB_fixed_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5546  case ARM::VLD4dWB_fixed_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5547  case ARM::VLD4qWB_fixed_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5548  case ARM::VLD4qWB_fixed_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5549  case ARM::VLD4qWB_fixed_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5550  case ARM::VLD4dWB_register_Asm_8:  Spacing = 1; return ARM::VLD4d8_UPD;
5551  case ARM::VLD4dWB_register_Asm_16: Spacing = 1; return ARM::VLD4d16_UPD;
5552  case ARM::VLD4dWB_register_Asm_32: Spacing = 1; return ARM::VLD4d32_UPD;
5553  case ARM::VLD4qWB_register_Asm_8:  Spacing = 2; return ARM::VLD4q8_UPD;
5554  case ARM::VLD4qWB_register_Asm_16: Spacing = 2; return ARM::VLD4q16_UPD;
5555  case ARM::VLD4qWB_register_Asm_32: Spacing = 2; return ARM::VLD4q32_UPD;
5556  case ARM::VLD4dAsm_8:  Spacing = 1; return ARM::VLD4d8;
5557  case ARM::VLD4dAsm_16: Spacing = 1; return ARM::VLD4d16;
5558  case ARM::VLD4dAsm_32: Spacing = 1; return ARM::VLD4d32;
5559  case ARM::VLD4qAsm_8:  Spacing = 2; return ARM::VLD4q8;
5560  case ARM::VLD4qAsm_16: Spacing = 2; return ARM::VLD4q16;
5561  case ARM::VLD4qAsm_32: Spacing = 2; return ARM::VLD4q32;
5562  }
5563}
5564
5565bool ARMAsmParser::
5566processInstruction(MCInst &Inst,
5567                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5568  switch (Inst.getOpcode()) {
5569  // Aliases for alternate PC+imm syntax of LDR instructions.
5570  case ARM::t2LDRpcrel:
5571    Inst.setOpcode(ARM::t2LDRpci);
5572    return true;
5573  case ARM::t2LDRBpcrel:
5574    Inst.setOpcode(ARM::t2LDRBpci);
5575    return true;
5576  case ARM::t2LDRHpcrel:
5577    Inst.setOpcode(ARM::t2LDRHpci);
5578    return true;
5579  case ARM::t2LDRSBpcrel:
5580    Inst.setOpcode(ARM::t2LDRSBpci);
5581    return true;
5582  case ARM::t2LDRSHpcrel:
5583    Inst.setOpcode(ARM::t2LDRSHpci);
5584    return true;
5585  // Handle NEON VST complex aliases.
5586  case ARM::VST1LNdWB_register_Asm_8:
5587  case ARM::VST1LNdWB_register_Asm_16:
5588  case ARM::VST1LNdWB_register_Asm_32: {
5589    MCInst TmpInst;
5590    // Shuffle the operands around so the lane index operand is in the
5591    // right place.
5592    unsigned Spacing;
5593    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5594    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5595    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5596    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5597    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5598    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5599    TmpInst.addOperand(Inst.getOperand(1)); // lane
5600    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5601    TmpInst.addOperand(Inst.getOperand(6));
5602    Inst = TmpInst;
5603    return true;
5604  }
5605
5606  case ARM::VST2LNdWB_register_Asm_8:
5607  case ARM::VST2LNdWB_register_Asm_16:
5608  case ARM::VST2LNdWB_register_Asm_32:
5609  case ARM::VST2LNqWB_register_Asm_16:
5610  case ARM::VST2LNqWB_register_Asm_32: {
5611    MCInst TmpInst;
5612    // Shuffle the operands around so the lane index operand is in the
5613    // right place.
5614    unsigned Spacing;
5615    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5616    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5617    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5618    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5619    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5620    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5621    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5622                                            Spacing));
5623    TmpInst.addOperand(Inst.getOperand(1)); // lane
5624    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5625    TmpInst.addOperand(Inst.getOperand(6));
5626    Inst = TmpInst;
5627    return true;
5628  }
5629
5630  case ARM::VST3LNdWB_register_Asm_8:
5631  case ARM::VST3LNdWB_register_Asm_16:
5632  case ARM::VST3LNdWB_register_Asm_32:
5633  case ARM::VST3LNqWB_register_Asm_16:
5634  case ARM::VST3LNqWB_register_Asm_32: {
5635    MCInst TmpInst;
5636    // Shuffle the operands around so the lane index operand is in the
5637    // right place.
5638    unsigned Spacing;
5639    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5640    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5641    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5642    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5643    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5644    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5645    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5646                                            Spacing));
5647    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5648                                            Spacing * 2));
5649    TmpInst.addOperand(Inst.getOperand(1)); // lane
5650    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5651    TmpInst.addOperand(Inst.getOperand(6));
5652    Inst = TmpInst;
5653    return true;
5654  }
5655
5656  case ARM::VST4LNdWB_register_Asm_8:
5657  case ARM::VST4LNdWB_register_Asm_16:
5658  case ARM::VST4LNdWB_register_Asm_32:
5659  case ARM::VST4LNqWB_register_Asm_16:
5660  case ARM::VST4LNqWB_register_Asm_32: {
5661    MCInst TmpInst;
5662    // Shuffle the operands around so the lane index operand is in the
5663    // right place.
5664    unsigned Spacing;
5665    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5666    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5667    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5668    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5669    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5670    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5671    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5672                                            Spacing));
5673    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5674                                            Spacing * 2));
5675    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5676                                            Spacing * 3));
5677    TmpInst.addOperand(Inst.getOperand(1)); // lane
5678    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5679    TmpInst.addOperand(Inst.getOperand(6));
5680    Inst = TmpInst;
5681    return true;
5682  }
5683
5684  case ARM::VST1LNdWB_fixed_Asm_8:
5685  case ARM::VST1LNdWB_fixed_Asm_16:
5686  case ARM::VST1LNdWB_fixed_Asm_32: {
5687    MCInst TmpInst;
5688    // Shuffle the operands around so the lane index operand is in the
5689    // right place.
5690    unsigned Spacing;
5691    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5692    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5693    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5694    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5695    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5696    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5697    TmpInst.addOperand(Inst.getOperand(1)); // lane
5698    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5699    TmpInst.addOperand(Inst.getOperand(5));
5700    Inst = TmpInst;
5701    return true;
5702  }
5703
5704  case ARM::VST2LNdWB_fixed_Asm_8:
5705  case ARM::VST2LNdWB_fixed_Asm_16:
5706  case ARM::VST2LNdWB_fixed_Asm_32:
5707  case ARM::VST2LNqWB_fixed_Asm_16:
5708  case ARM::VST2LNqWB_fixed_Asm_32: {
5709    MCInst TmpInst;
5710    // Shuffle the operands around so the lane index operand is in the
5711    // right place.
5712    unsigned Spacing;
5713    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5714    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5715    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5716    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5717    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5718    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5719    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5720                                            Spacing));
5721    TmpInst.addOperand(Inst.getOperand(1)); // lane
5722    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5723    TmpInst.addOperand(Inst.getOperand(5));
5724    Inst = TmpInst;
5725    return true;
5726  }
5727
5728  case ARM::VST3LNdWB_fixed_Asm_8:
5729  case ARM::VST3LNdWB_fixed_Asm_16:
5730  case ARM::VST3LNdWB_fixed_Asm_32:
5731  case ARM::VST3LNqWB_fixed_Asm_16:
5732  case ARM::VST3LNqWB_fixed_Asm_32: {
5733    MCInst TmpInst;
5734    // Shuffle the operands around so the lane index operand is in the
5735    // right place.
5736    unsigned Spacing;
5737    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5738    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5739    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5740    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5741    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5742    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5743    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5744                                            Spacing));
5745    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5746                                            Spacing * 2));
5747    TmpInst.addOperand(Inst.getOperand(1)); // lane
5748    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5749    TmpInst.addOperand(Inst.getOperand(5));
5750    Inst = TmpInst;
5751    return true;
5752  }
5753
5754  case ARM::VST4LNdWB_fixed_Asm_8:
5755  case ARM::VST4LNdWB_fixed_Asm_16:
5756  case ARM::VST4LNdWB_fixed_Asm_32:
5757  case ARM::VST4LNqWB_fixed_Asm_16:
5758  case ARM::VST4LNqWB_fixed_Asm_32: {
5759    MCInst TmpInst;
5760    // Shuffle the operands around so the lane index operand is in the
5761    // right place.
5762    unsigned Spacing;
5763    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5764    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5765    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5766    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5767    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5768    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5769    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5770                                            Spacing));
5771    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5772                                            Spacing * 2));
5773    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5774                                            Spacing * 3));
5775    TmpInst.addOperand(Inst.getOperand(1)); // lane
5776    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5777    TmpInst.addOperand(Inst.getOperand(5));
5778    Inst = TmpInst;
5779    return true;
5780  }
5781
5782  case ARM::VST1LNdAsm_8:
5783  case ARM::VST1LNdAsm_16:
5784  case ARM::VST1LNdAsm_32: {
5785    MCInst TmpInst;
5786    // Shuffle the operands around so the lane index operand is in the
5787    // right place.
5788    unsigned Spacing;
5789    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5790    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5791    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5792    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5793    TmpInst.addOperand(Inst.getOperand(1)); // lane
5794    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5795    TmpInst.addOperand(Inst.getOperand(5));
5796    Inst = TmpInst;
5797    return true;
5798  }
5799
5800  case ARM::VST2LNdAsm_8:
5801  case ARM::VST2LNdAsm_16:
5802  case ARM::VST2LNdAsm_32:
5803  case ARM::VST2LNqAsm_16:
5804  case ARM::VST2LNqAsm_32: {
5805    MCInst TmpInst;
5806    // Shuffle the operands around so the lane index operand is in the
5807    // right place.
5808    unsigned Spacing;
5809    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5810    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5811    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5812    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5813    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5814                                            Spacing));
5815    TmpInst.addOperand(Inst.getOperand(1)); // lane
5816    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5817    TmpInst.addOperand(Inst.getOperand(5));
5818    Inst = TmpInst;
5819    return true;
5820  }
5821
5822  case ARM::VST3LNdAsm_8:
5823  case ARM::VST3LNdAsm_16:
5824  case ARM::VST3LNdAsm_32:
5825  case ARM::VST3LNqAsm_16:
5826  case ARM::VST3LNqAsm_32: {
5827    MCInst TmpInst;
5828    // Shuffle the operands around so the lane index operand is in the
5829    // right place.
5830    unsigned Spacing;
5831    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5832    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5833    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5834    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5835    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5836                                            Spacing));
5837    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5838                                            Spacing * 2));
5839    TmpInst.addOperand(Inst.getOperand(1)); // lane
5840    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5841    TmpInst.addOperand(Inst.getOperand(5));
5842    Inst = TmpInst;
5843    return true;
5844  }
5845
5846  case ARM::VST4LNdAsm_8:
5847  case ARM::VST4LNdAsm_16:
5848  case ARM::VST4LNdAsm_32:
5849  case ARM::VST4LNqAsm_16:
5850  case ARM::VST4LNqAsm_32: {
5851    MCInst TmpInst;
5852    // Shuffle the operands around so the lane index operand is in the
5853    // right place.
5854    unsigned Spacing;
5855    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
5856    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5857    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5858    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5859    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5860                                            Spacing));
5861    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5862                                            Spacing * 2));
5863    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5864                                            Spacing * 3));
5865    TmpInst.addOperand(Inst.getOperand(1)); // lane
5866    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5867    TmpInst.addOperand(Inst.getOperand(5));
5868    Inst = TmpInst;
5869    return true;
5870  }
5871
5872  // Handle NEON VLD complex aliases.
5873  case ARM::VLD1LNdWB_register_Asm_8:
5874  case ARM::VLD1LNdWB_register_Asm_16:
5875  case ARM::VLD1LNdWB_register_Asm_32: {
5876    MCInst TmpInst;
5877    // Shuffle the operands around so the lane index operand is in the
5878    // right place.
5879    unsigned Spacing;
5880    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5881    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5882    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5883    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5884    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5885    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5886    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5887    TmpInst.addOperand(Inst.getOperand(1)); // lane
5888    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5889    TmpInst.addOperand(Inst.getOperand(6));
5890    Inst = TmpInst;
5891    return true;
5892  }
5893
5894  case ARM::VLD2LNdWB_register_Asm_8:
5895  case ARM::VLD2LNdWB_register_Asm_16:
5896  case ARM::VLD2LNdWB_register_Asm_32:
5897  case ARM::VLD2LNqWB_register_Asm_16:
5898  case ARM::VLD2LNqWB_register_Asm_32: {
5899    MCInst TmpInst;
5900    // Shuffle the operands around so the lane index operand is in the
5901    // right place.
5902    unsigned Spacing;
5903    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5904    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5905    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5906                                            Spacing));
5907    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5908    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5909    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5910    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5911    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5912    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5913                                            Spacing));
5914    TmpInst.addOperand(Inst.getOperand(1)); // lane
5915    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5916    TmpInst.addOperand(Inst.getOperand(6));
5917    Inst = TmpInst;
5918    return true;
5919  }
5920
5921  case ARM::VLD3LNdWB_register_Asm_8:
5922  case ARM::VLD3LNdWB_register_Asm_16:
5923  case ARM::VLD3LNdWB_register_Asm_32:
5924  case ARM::VLD3LNqWB_register_Asm_16:
5925  case ARM::VLD3LNqWB_register_Asm_32: {
5926    MCInst TmpInst;
5927    // Shuffle the operands around so the lane index operand is in the
5928    // right place.
5929    unsigned Spacing;
5930    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5931    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5932    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5933                                            Spacing));
5934    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5935                                            Spacing * 2));
5936    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5937    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5938    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5939    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5940    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5941    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5942                                            Spacing));
5943    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5944                                            Spacing * 2));
5945    TmpInst.addOperand(Inst.getOperand(1)); // lane
5946    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5947    TmpInst.addOperand(Inst.getOperand(6));
5948    Inst = TmpInst;
5949    return true;
5950  }
5951
5952  case ARM::VLD4LNdWB_register_Asm_8:
5953  case ARM::VLD4LNdWB_register_Asm_16:
5954  case ARM::VLD4LNdWB_register_Asm_32:
5955  case ARM::VLD4LNqWB_register_Asm_16:
5956  case ARM::VLD4LNqWB_register_Asm_32: {
5957    MCInst TmpInst;
5958    // Shuffle the operands around so the lane index operand is in the
5959    // right place.
5960    unsigned Spacing;
5961    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5962    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5963    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5964                                            Spacing));
5965    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5966                                            Spacing * 2));
5967    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5968                                            Spacing * 3));
5969    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5970    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5971    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5972    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5973    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5974    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5975                                            Spacing));
5976    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5977                                            Spacing * 2));
5978    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
5979                                            Spacing * 3));
5980    TmpInst.addOperand(Inst.getOperand(1)); // lane
5981    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5982    TmpInst.addOperand(Inst.getOperand(6));
5983    Inst = TmpInst;
5984    return true;
5985  }
5986
5987  case ARM::VLD1LNdWB_fixed_Asm_8:
5988  case ARM::VLD1LNdWB_fixed_Asm_16:
5989  case ARM::VLD1LNdWB_fixed_Asm_32: {
5990    MCInst TmpInst;
5991    // Shuffle the operands around so the lane index operand is in the
5992    // right place.
5993    unsigned Spacing;
5994    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
5995    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5996    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5997    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5998    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5999    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6000    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6001    TmpInst.addOperand(Inst.getOperand(1)); // lane
6002    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6003    TmpInst.addOperand(Inst.getOperand(5));
6004    Inst = TmpInst;
6005    return true;
6006  }
6007
6008  case ARM::VLD2LNdWB_fixed_Asm_8:
6009  case ARM::VLD2LNdWB_fixed_Asm_16:
6010  case ARM::VLD2LNdWB_fixed_Asm_32:
6011  case ARM::VLD2LNqWB_fixed_Asm_16:
6012  case ARM::VLD2LNqWB_fixed_Asm_32: {
6013    MCInst TmpInst;
6014    // Shuffle the operands around so the lane index operand is in the
6015    // right place.
6016    unsigned Spacing;
6017    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6018    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6019    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6020                                            Spacing));
6021    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6022    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6023    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6024    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6025    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6026    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6027                                            Spacing));
6028    TmpInst.addOperand(Inst.getOperand(1)); // lane
6029    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6030    TmpInst.addOperand(Inst.getOperand(5));
6031    Inst = TmpInst;
6032    return true;
6033  }
6034
6035  case ARM::VLD3LNdWB_fixed_Asm_8:
6036  case ARM::VLD3LNdWB_fixed_Asm_16:
6037  case ARM::VLD3LNdWB_fixed_Asm_32:
6038  case ARM::VLD3LNqWB_fixed_Asm_16:
6039  case ARM::VLD3LNqWB_fixed_Asm_32: {
6040    MCInst TmpInst;
6041    // Shuffle the operands around so the lane index operand is in the
6042    // right place.
6043    unsigned Spacing;
6044    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6045    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6046    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6047                                            Spacing));
6048    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6049                                            Spacing * 2));
6050    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6051    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6052    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6053    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6054    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6055    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6056                                            Spacing));
6057    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6058                                            Spacing * 2));
6059    TmpInst.addOperand(Inst.getOperand(1)); // lane
6060    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6061    TmpInst.addOperand(Inst.getOperand(5));
6062    Inst = TmpInst;
6063    return true;
6064  }
6065
6066  case ARM::VLD4LNdWB_fixed_Asm_8:
6067  case ARM::VLD4LNdWB_fixed_Asm_16:
6068  case ARM::VLD4LNdWB_fixed_Asm_32:
6069  case ARM::VLD4LNqWB_fixed_Asm_16:
6070  case ARM::VLD4LNqWB_fixed_Asm_32: {
6071    MCInst TmpInst;
6072    // Shuffle the operands around so the lane index operand is in the
6073    // right place.
6074    unsigned Spacing;
6075    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6076    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6077    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6078                                            Spacing));
6079    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6080                                            Spacing * 2));
6081    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6082                                            Spacing * 3));
6083    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
6084    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6085    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6086    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6087    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6088    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6089                                            Spacing));
6090    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6091                                            Spacing * 2));
6092    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6093                                            Spacing * 3));
6094    TmpInst.addOperand(Inst.getOperand(1)); // lane
6095    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6096    TmpInst.addOperand(Inst.getOperand(5));
6097    Inst = TmpInst;
6098    return true;
6099  }
6100
6101  case ARM::VLD1LNdAsm_8:
6102  case ARM::VLD1LNdAsm_16:
6103  case ARM::VLD1LNdAsm_32: {
6104    MCInst TmpInst;
6105    // Shuffle the operands around so the lane index operand is in the
6106    // right place.
6107    unsigned Spacing;
6108    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6109    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6110    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6111    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6112    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6113    TmpInst.addOperand(Inst.getOperand(1)); // lane
6114    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6115    TmpInst.addOperand(Inst.getOperand(5));
6116    Inst = TmpInst;
6117    return true;
6118  }
6119
6120  case ARM::VLD2LNdAsm_8:
6121  case ARM::VLD2LNdAsm_16:
6122  case ARM::VLD2LNdAsm_32:
6123  case ARM::VLD2LNqAsm_16:
6124  case ARM::VLD2LNqAsm_32: {
6125    MCInst TmpInst;
6126    // Shuffle the operands around so the lane index operand is in the
6127    // right place.
6128    unsigned Spacing;
6129    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6130    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6131    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6132                                            Spacing));
6133    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6134    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6135    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6136    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6137                                            Spacing));
6138    TmpInst.addOperand(Inst.getOperand(1)); // lane
6139    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6140    TmpInst.addOperand(Inst.getOperand(5));
6141    Inst = TmpInst;
6142    return true;
6143  }
6144
6145  case ARM::VLD3LNdAsm_8:
6146  case ARM::VLD3LNdAsm_16:
6147  case ARM::VLD3LNdAsm_32:
6148  case ARM::VLD3LNqAsm_16:
6149  case ARM::VLD3LNqAsm_32: {
6150    MCInst TmpInst;
6151    // Shuffle the operands around so the lane index operand is in the
6152    // right place.
6153    unsigned Spacing;
6154    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6155    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6156    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6157                                            Spacing));
6158    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6159                                            Spacing * 2));
6160    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6161    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6162    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6163    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6164                                            Spacing));
6165    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6166                                            Spacing * 2));
6167    TmpInst.addOperand(Inst.getOperand(1)); // lane
6168    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6169    TmpInst.addOperand(Inst.getOperand(5));
6170    Inst = TmpInst;
6171    return true;
6172  }
6173
6174  case ARM::VLD4LNdAsm_8:
6175  case ARM::VLD4LNdAsm_16:
6176  case ARM::VLD4LNdAsm_32:
6177  case ARM::VLD4LNqAsm_16:
6178  case ARM::VLD4LNqAsm_32: {
6179    MCInst TmpInst;
6180    // Shuffle the operands around so the lane index operand is in the
6181    // right place.
6182    unsigned Spacing;
6183    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6184    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6185    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6186                                            Spacing));
6187    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6188                                            Spacing * 2));
6189    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6190                                            Spacing * 3));
6191    TmpInst.addOperand(Inst.getOperand(2)); // Rn
6192    TmpInst.addOperand(Inst.getOperand(3)); // alignment
6193    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
6194    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6195                                            Spacing));
6196    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6197                                            Spacing * 2));
6198    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6199                                            Spacing * 3));
6200    TmpInst.addOperand(Inst.getOperand(1)); // lane
6201    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6202    TmpInst.addOperand(Inst.getOperand(5));
6203    Inst = TmpInst;
6204    return true;
6205  }
6206
6207  // VLD3DUP single 3-element structure to all lanes instructions.
6208  case ARM::VLD3DUPdAsm_8:
6209  case ARM::VLD3DUPdAsm_16:
6210  case ARM::VLD3DUPdAsm_32:
6211  case ARM::VLD3DUPqAsm_8:
6212  case ARM::VLD3DUPqAsm_16:
6213  case ARM::VLD3DUPqAsm_32: {
6214    MCInst TmpInst;
6215    unsigned Spacing;
6216    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6217    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6218    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6219                                            Spacing));
6220    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6221                                            Spacing * 2));
6222    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6223    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6224    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6225    TmpInst.addOperand(Inst.getOperand(4));
6226    Inst = TmpInst;
6227    return true;
6228  }
6229
6230  case ARM::VLD3DUPdWB_fixed_Asm_8:
6231  case ARM::VLD3DUPdWB_fixed_Asm_16:
6232  case ARM::VLD3DUPdWB_fixed_Asm_32:
6233  case ARM::VLD3DUPqWB_fixed_Asm_8:
6234  case ARM::VLD3DUPqWB_fixed_Asm_16:
6235  case ARM::VLD3DUPqWB_fixed_Asm_32: {
6236    MCInst TmpInst;
6237    unsigned Spacing;
6238    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6239    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6240    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6241                                            Spacing));
6242    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6243                                            Spacing * 2));
6244    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6245    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6246    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6247    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6248    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6249    TmpInst.addOperand(Inst.getOperand(4));
6250    Inst = TmpInst;
6251    return true;
6252  }
6253
6254  case ARM::VLD3DUPdWB_register_Asm_8:
6255  case ARM::VLD3DUPdWB_register_Asm_16:
6256  case ARM::VLD3DUPdWB_register_Asm_32:
6257  case ARM::VLD3DUPqWB_register_Asm_8:
6258  case ARM::VLD3DUPqWB_register_Asm_16:
6259  case ARM::VLD3DUPqWB_register_Asm_32: {
6260    MCInst TmpInst;
6261    unsigned Spacing;
6262    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6263    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6264    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6265                                            Spacing));
6266    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6267                                            Spacing * 2));
6268    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6269    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6270    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6271    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6272    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6273    TmpInst.addOperand(Inst.getOperand(5));
6274    Inst = TmpInst;
6275    return true;
6276  }
6277
6278  // VLD3 multiple 3-element structure instructions.
6279  case ARM::VLD3dAsm_8:
6280  case ARM::VLD3dAsm_16:
6281  case ARM::VLD3dAsm_32:
6282  case ARM::VLD3qAsm_8:
6283  case ARM::VLD3qAsm_16:
6284  case ARM::VLD3qAsm_32: {
6285    MCInst TmpInst;
6286    unsigned Spacing;
6287    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6288    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6289    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6290                                            Spacing));
6291    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6292                                            Spacing * 2));
6293    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6294    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6295    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6296    TmpInst.addOperand(Inst.getOperand(4));
6297    Inst = TmpInst;
6298    return true;
6299  }
6300
6301  case ARM::VLD3dWB_fixed_Asm_8:
6302  case ARM::VLD3dWB_fixed_Asm_16:
6303  case ARM::VLD3dWB_fixed_Asm_32:
6304  case ARM::VLD3qWB_fixed_Asm_8:
6305  case ARM::VLD3qWB_fixed_Asm_16:
6306  case ARM::VLD3qWB_fixed_Asm_32: {
6307    MCInst TmpInst;
6308    unsigned Spacing;
6309    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6310    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6311    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6312                                            Spacing));
6313    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6314                                            Spacing * 2));
6315    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6316    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6317    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6318    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6319    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6320    TmpInst.addOperand(Inst.getOperand(4));
6321    Inst = TmpInst;
6322    return true;
6323  }
6324
6325  case ARM::VLD3dWB_register_Asm_8:
6326  case ARM::VLD3dWB_register_Asm_16:
6327  case ARM::VLD3dWB_register_Asm_32:
6328  case ARM::VLD3qWB_register_Asm_8:
6329  case ARM::VLD3qWB_register_Asm_16:
6330  case ARM::VLD3qWB_register_Asm_32: {
6331    MCInst TmpInst;
6332    unsigned Spacing;
6333    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6334    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6335    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6336                                            Spacing));
6337    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6338                                            Spacing * 2));
6339    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6340    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6341    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6342    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6343    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6344    TmpInst.addOperand(Inst.getOperand(5));
6345    Inst = TmpInst;
6346    return true;
6347  }
6348
6349  // VLD4DUP single 3-element structure to all lanes instructions.
6350  case ARM::VLD4DUPdAsm_8:
6351  case ARM::VLD4DUPdAsm_16:
6352  case ARM::VLD4DUPdAsm_32:
6353  case ARM::VLD4DUPqAsm_8:
6354  case ARM::VLD4DUPqAsm_16:
6355  case ARM::VLD4DUPqAsm_32: {
6356    MCInst TmpInst;
6357    unsigned Spacing;
6358    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6359    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6360    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6361                                            Spacing));
6362    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6363                                            Spacing * 2));
6364    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6365                                            Spacing * 3));
6366    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6367    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6368    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6369    TmpInst.addOperand(Inst.getOperand(4));
6370    Inst = TmpInst;
6371    return true;
6372  }
6373
6374  case ARM::VLD4DUPdWB_fixed_Asm_8:
6375  case ARM::VLD4DUPdWB_fixed_Asm_16:
6376  case ARM::VLD4DUPdWB_fixed_Asm_32:
6377  case ARM::VLD4DUPqWB_fixed_Asm_8:
6378  case ARM::VLD4DUPqWB_fixed_Asm_16:
6379  case ARM::VLD4DUPqWB_fixed_Asm_32: {
6380    MCInst TmpInst;
6381    unsigned Spacing;
6382    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6383    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6384    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6385                                            Spacing));
6386    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6387                                            Spacing * 2));
6388    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6389                                            Spacing * 3));
6390    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6391    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6392    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6393    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6394    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6395    TmpInst.addOperand(Inst.getOperand(4));
6396    Inst = TmpInst;
6397    return true;
6398  }
6399
6400  case ARM::VLD4DUPdWB_register_Asm_8:
6401  case ARM::VLD4DUPdWB_register_Asm_16:
6402  case ARM::VLD4DUPdWB_register_Asm_32:
6403  case ARM::VLD4DUPqWB_register_Asm_8:
6404  case ARM::VLD4DUPqWB_register_Asm_16:
6405  case ARM::VLD4DUPqWB_register_Asm_32: {
6406    MCInst TmpInst;
6407    unsigned Spacing;
6408    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6409    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6410    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6411                                            Spacing));
6412    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6413                                            Spacing * 2));
6414    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6415                                            Spacing * 3));
6416    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6417    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6418    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6419    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6420    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6421    TmpInst.addOperand(Inst.getOperand(5));
6422    Inst = TmpInst;
6423    return true;
6424  }
6425
6426  // VLD4 multiple 4-element structure instructions.
6427  case ARM::VLD4dAsm_8:
6428  case ARM::VLD4dAsm_16:
6429  case ARM::VLD4dAsm_32:
6430  case ARM::VLD4qAsm_8:
6431  case ARM::VLD4qAsm_16:
6432  case ARM::VLD4qAsm_32: {
6433    MCInst TmpInst;
6434    unsigned Spacing;
6435    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6436    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6437    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6438                                            Spacing));
6439    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6440                                            Spacing * 2));
6441    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6442                                            Spacing * 3));
6443    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6444    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6445    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6446    TmpInst.addOperand(Inst.getOperand(4));
6447    Inst = TmpInst;
6448    return true;
6449  }
6450
6451  case ARM::VLD4dWB_fixed_Asm_8:
6452  case ARM::VLD4dWB_fixed_Asm_16:
6453  case ARM::VLD4dWB_fixed_Asm_32:
6454  case ARM::VLD4qWB_fixed_Asm_8:
6455  case ARM::VLD4qWB_fixed_Asm_16:
6456  case ARM::VLD4qWB_fixed_Asm_32: {
6457    MCInst TmpInst;
6458    unsigned Spacing;
6459    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6460    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6461    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6462                                            Spacing));
6463    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6464                                            Spacing * 2));
6465    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6466                                            Spacing * 3));
6467    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6468    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6469    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6470    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6471    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6472    TmpInst.addOperand(Inst.getOperand(4));
6473    Inst = TmpInst;
6474    return true;
6475  }
6476
6477  case ARM::VLD4dWB_register_Asm_8:
6478  case ARM::VLD4dWB_register_Asm_16:
6479  case ARM::VLD4dWB_register_Asm_32:
6480  case ARM::VLD4qWB_register_Asm_8:
6481  case ARM::VLD4qWB_register_Asm_16:
6482  case ARM::VLD4qWB_register_Asm_32: {
6483    MCInst TmpInst;
6484    unsigned Spacing;
6485    TmpInst.setOpcode(getRealVLDOpcode(Inst.getOpcode(), Spacing));
6486    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6487    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6488                                            Spacing));
6489    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6490                                            Spacing * 2));
6491    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6492                                            Spacing * 3));
6493    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6494    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6495    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6496    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6497    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6498    TmpInst.addOperand(Inst.getOperand(5));
6499    Inst = TmpInst;
6500    return true;
6501  }
6502
6503  // VST3 multiple 3-element structure instructions.
6504  case ARM::VST3dAsm_8:
6505  case ARM::VST3dAsm_16:
6506  case ARM::VST3dAsm_32:
6507  case ARM::VST3qAsm_8:
6508  case ARM::VST3qAsm_16:
6509  case ARM::VST3qAsm_32: {
6510    MCInst TmpInst;
6511    unsigned Spacing;
6512    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6513    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6514    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6515    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6516    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6517                                            Spacing));
6518    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6519                                            Spacing * 2));
6520    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6521    TmpInst.addOperand(Inst.getOperand(4));
6522    Inst = TmpInst;
6523    return true;
6524  }
6525
6526  case ARM::VST3dWB_fixed_Asm_8:
6527  case ARM::VST3dWB_fixed_Asm_16:
6528  case ARM::VST3dWB_fixed_Asm_32:
6529  case ARM::VST3qWB_fixed_Asm_8:
6530  case ARM::VST3qWB_fixed_Asm_16:
6531  case ARM::VST3qWB_fixed_Asm_32: {
6532    MCInst TmpInst;
6533    unsigned Spacing;
6534    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6535    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6536    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6537    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6538    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6539    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6540    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6541                                            Spacing));
6542    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6543                                            Spacing * 2));
6544    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6545    TmpInst.addOperand(Inst.getOperand(4));
6546    Inst = TmpInst;
6547    return true;
6548  }
6549
6550  case ARM::VST3dWB_register_Asm_8:
6551  case ARM::VST3dWB_register_Asm_16:
6552  case ARM::VST3dWB_register_Asm_32:
6553  case ARM::VST3qWB_register_Asm_8:
6554  case ARM::VST3qWB_register_Asm_16:
6555  case ARM::VST3qWB_register_Asm_32: {
6556    MCInst TmpInst;
6557    unsigned Spacing;
6558    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6559    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6560    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6561    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6562    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6563    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6564    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6565                                            Spacing));
6566    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6567                                            Spacing * 2));
6568    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6569    TmpInst.addOperand(Inst.getOperand(5));
6570    Inst = TmpInst;
6571    return true;
6572  }
6573
6574  // VST4 multiple 3-element structure instructions.
6575  case ARM::VST4dAsm_8:
6576  case ARM::VST4dAsm_16:
6577  case ARM::VST4dAsm_32:
6578  case ARM::VST4qAsm_8:
6579  case ARM::VST4qAsm_16:
6580  case ARM::VST4qAsm_32: {
6581    MCInst TmpInst;
6582    unsigned Spacing;
6583    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6584    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6585    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6586    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6587    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6588                                            Spacing));
6589    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6590                                            Spacing * 2));
6591    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6592                                            Spacing * 3));
6593    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6594    TmpInst.addOperand(Inst.getOperand(4));
6595    Inst = TmpInst;
6596    return true;
6597  }
6598
6599  case ARM::VST4dWB_fixed_Asm_8:
6600  case ARM::VST4dWB_fixed_Asm_16:
6601  case ARM::VST4dWB_fixed_Asm_32:
6602  case ARM::VST4qWB_fixed_Asm_8:
6603  case ARM::VST4qWB_fixed_Asm_16:
6604  case ARM::VST4qWB_fixed_Asm_32: {
6605    MCInst TmpInst;
6606    unsigned Spacing;
6607    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6608    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6609    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6610    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6611    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
6612    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6613    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6614                                            Spacing));
6615    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6616                                            Spacing * 2));
6617    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6618                                            Spacing * 3));
6619    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6620    TmpInst.addOperand(Inst.getOperand(4));
6621    Inst = TmpInst;
6622    return true;
6623  }
6624
6625  case ARM::VST4dWB_register_Asm_8:
6626  case ARM::VST4dWB_register_Asm_16:
6627  case ARM::VST4dWB_register_Asm_32:
6628  case ARM::VST4qWB_register_Asm_8:
6629  case ARM::VST4qWB_register_Asm_16:
6630  case ARM::VST4qWB_register_Asm_32: {
6631    MCInst TmpInst;
6632    unsigned Spacing;
6633    TmpInst.setOpcode(getRealVSTOpcode(Inst.getOpcode(), Spacing));
6634    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6635    TmpInst.addOperand(Inst.getOperand(1)); // Rn_wb == tied Rn
6636    TmpInst.addOperand(Inst.getOperand(2)); // alignment
6637    TmpInst.addOperand(Inst.getOperand(3)); // Rm
6638    TmpInst.addOperand(Inst.getOperand(0)); // Vd
6639    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6640                                            Spacing));
6641    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6642                                            Spacing * 2));
6643    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg() +
6644                                            Spacing * 3));
6645    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6646    TmpInst.addOperand(Inst.getOperand(5));
6647    Inst = TmpInst;
6648    return true;
6649  }
6650
6651  // Handle the Thumb2 mode MOV complex aliases.
6652  case ARM::t2MOVsr:
6653  case ARM::t2MOVSsr: {
6654    // Which instruction to expand to depends on the CCOut operand and
6655    // whether we're in an IT block if the register operands are low
6656    // registers.
6657    bool isNarrow = false;
6658    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6659        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6660        isARMLowRegister(Inst.getOperand(2).getReg()) &&
6661        Inst.getOperand(0).getReg() == Inst.getOperand(1).getReg() &&
6662        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsr))
6663      isNarrow = true;
6664    MCInst TmpInst;
6665    unsigned newOpc;
6666    switch(ARM_AM::getSORegShOp(Inst.getOperand(3).getImm())) {
6667    default: llvm_unreachable("unexpected opcode!");
6668    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRrr : ARM::t2ASRrr; break;
6669    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRrr : ARM::t2LSRrr; break;
6670    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLrr : ARM::t2LSLrr; break;
6671    case ARM_AM::ror: newOpc = isNarrow ? ARM::tROR   : ARM::t2RORrr; break;
6672    }
6673    TmpInst.setOpcode(newOpc);
6674    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6675    if (isNarrow)
6676      TmpInst.addOperand(MCOperand::CreateReg(
6677          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6678    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6679    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6680    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
6681    TmpInst.addOperand(Inst.getOperand(5));
6682    if (!isNarrow)
6683      TmpInst.addOperand(MCOperand::CreateReg(
6684          Inst.getOpcode() == ARM::t2MOVSsr ? ARM::CPSR : 0));
6685    Inst = TmpInst;
6686    return true;
6687  }
6688  case ARM::t2MOVsi:
6689  case ARM::t2MOVSsi: {
6690    // Which instruction to expand to depends on the CCOut operand and
6691    // whether we're in an IT block if the register operands are low
6692    // registers.
6693    bool isNarrow = false;
6694    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
6695        isARMLowRegister(Inst.getOperand(1).getReg()) &&
6696        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
6697      isNarrow = true;
6698    MCInst TmpInst;
6699    unsigned newOpc;
6700    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
6701    default: llvm_unreachable("unexpected opcode!");
6702    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
6703    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
6704    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
6705    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
6706    case ARM_AM::rrx: isNarrow = false; newOpc = ARM::t2RRX; break;
6707    }
6708    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
6709    if (Ammount == 32) Ammount = 0;
6710    TmpInst.setOpcode(newOpc);
6711    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6712    if (isNarrow)
6713      TmpInst.addOperand(MCOperand::CreateReg(
6714          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6715    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6716    if (newOpc != ARM::t2RRX)
6717      TmpInst.addOperand(MCOperand::CreateImm(Ammount));
6718    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6719    TmpInst.addOperand(Inst.getOperand(4));
6720    if (!isNarrow)
6721      TmpInst.addOperand(MCOperand::CreateReg(
6722          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
6723    Inst = TmpInst;
6724    return true;
6725  }
6726  // Handle the ARM mode MOV complex aliases.
6727  case ARM::ASRr:
6728  case ARM::LSRr:
6729  case ARM::LSLr:
6730  case ARM::RORr: {
6731    ARM_AM::ShiftOpc ShiftTy;
6732    switch(Inst.getOpcode()) {
6733    default: llvm_unreachable("unexpected opcode!");
6734    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
6735    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
6736    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
6737    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
6738    }
6739    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
6740    MCInst TmpInst;
6741    TmpInst.setOpcode(ARM::MOVsr);
6742    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6743    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6744    TmpInst.addOperand(Inst.getOperand(2)); // Rm
6745    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6746    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6747    TmpInst.addOperand(Inst.getOperand(4));
6748    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6749    Inst = TmpInst;
6750    return true;
6751  }
6752  case ARM::ASRi:
6753  case ARM::LSRi:
6754  case ARM::LSLi:
6755  case ARM::RORi: {
6756    ARM_AM::ShiftOpc ShiftTy;
6757    switch(Inst.getOpcode()) {
6758    default: llvm_unreachable("unexpected opcode!");
6759    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
6760    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
6761    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
6762    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
6763    }
6764    // A shift by zero is a plain MOVr, not a MOVsi.
6765    unsigned Amt = Inst.getOperand(2).getImm();
6766    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
6767    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
6768    MCInst TmpInst;
6769    TmpInst.setOpcode(Opc);
6770    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6771    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6772    if (Opc == ARM::MOVsi)
6773      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6774    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
6775    TmpInst.addOperand(Inst.getOperand(4));
6776    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
6777    Inst = TmpInst;
6778    return true;
6779  }
6780  case ARM::RRXi: {
6781    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
6782    MCInst TmpInst;
6783    TmpInst.setOpcode(ARM::MOVsi);
6784    TmpInst.addOperand(Inst.getOperand(0)); // Rd
6785    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6786    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
6787    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6788    TmpInst.addOperand(Inst.getOperand(3));
6789    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
6790    Inst = TmpInst;
6791    return true;
6792  }
6793  case ARM::t2LDMIA_UPD: {
6794    // If this is a load of a single register, then we should use
6795    // a post-indexed LDR instruction instead, per the ARM ARM.
6796    if (Inst.getNumOperands() != 5)
6797      return false;
6798    MCInst TmpInst;
6799    TmpInst.setOpcode(ARM::t2LDR_POST);
6800    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6801    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6802    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6803    TmpInst.addOperand(MCOperand::CreateImm(4));
6804    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6805    TmpInst.addOperand(Inst.getOperand(3));
6806    Inst = TmpInst;
6807    return true;
6808  }
6809  case ARM::t2STMDB_UPD: {
6810    // If this is a store of a single register, then we should use
6811    // a pre-indexed STR instruction instead, per the ARM ARM.
6812    if (Inst.getNumOperands() != 5)
6813      return false;
6814    MCInst TmpInst;
6815    TmpInst.setOpcode(ARM::t2STR_PRE);
6816    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6817    TmpInst.addOperand(Inst.getOperand(4)); // Rt
6818    TmpInst.addOperand(Inst.getOperand(1)); // Rn
6819    TmpInst.addOperand(MCOperand::CreateImm(-4));
6820    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6821    TmpInst.addOperand(Inst.getOperand(3));
6822    Inst = TmpInst;
6823    return true;
6824  }
6825  case ARM::LDMIA_UPD:
6826    // If this is a load of a single register via a 'pop', then we should use
6827    // a post-indexed LDR instruction instead, per the ARM ARM.
6828    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
6829        Inst.getNumOperands() == 5) {
6830      MCInst TmpInst;
6831      TmpInst.setOpcode(ARM::LDR_POST_IMM);
6832      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6833      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6834      TmpInst.addOperand(Inst.getOperand(1)); // Rn
6835      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
6836      TmpInst.addOperand(MCOperand::CreateImm(4));
6837      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6838      TmpInst.addOperand(Inst.getOperand(3));
6839      Inst = TmpInst;
6840      return true;
6841    }
6842    break;
6843  case ARM::STMDB_UPD:
6844    // If this is a store of a single register via a 'push', then we should use
6845    // a pre-indexed STR instruction instead, per the ARM ARM.
6846    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
6847        Inst.getNumOperands() == 5) {
6848      MCInst TmpInst;
6849      TmpInst.setOpcode(ARM::STR_PRE_IMM);
6850      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
6851      TmpInst.addOperand(Inst.getOperand(4)); // Rt
6852      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
6853      TmpInst.addOperand(MCOperand::CreateImm(-4));
6854      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
6855      TmpInst.addOperand(Inst.getOperand(3));
6856      Inst = TmpInst;
6857    }
6858    break;
6859  case ARM::t2ADDri12:
6860    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
6861    // mnemonic was used (not "addw"), encoding T3 is preferred.
6862    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
6863        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6864      break;
6865    Inst.setOpcode(ARM::t2ADDri);
6866    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6867    break;
6868  case ARM::t2SUBri12:
6869    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
6870    // mnemonic was used (not "subw"), encoding T3 is preferred.
6871    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
6872        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
6873      break;
6874    Inst.setOpcode(ARM::t2SUBri);
6875    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
6876    break;
6877  case ARM::tADDi8:
6878    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6879    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6880    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6881    // to encoding T1 if <Rd> is omitted."
6882    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6883      Inst.setOpcode(ARM::tADDi3);
6884      return true;
6885    }
6886    break;
6887  case ARM::tSUBi8:
6888    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
6889    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
6890    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
6891    // to encoding T1 if <Rd> is omitted."
6892    if ((unsigned)Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
6893      Inst.setOpcode(ARM::tSUBi3);
6894      return true;
6895    }
6896    break;
6897  case ARM::t2ADDri:
6898  case ARM::t2SUBri: {
6899    // If the destination and first source operand are the same, and
6900    // the flags are compatible with the current IT status, use encoding T2
6901    // instead of T3. For compatibility with the system 'as'. Make sure the
6902    // wide encoding wasn't explicit.
6903    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6904        !isARMLowRegister(Inst.getOperand(0).getReg()) ||
6905        (unsigned)Inst.getOperand(2).getImm() > 255 ||
6906        ((!inITBlock() && Inst.getOperand(5).getReg() != ARM::CPSR) ||
6907        (inITBlock() && Inst.getOperand(5).getReg() != 0)) ||
6908        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6909         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6910      break;
6911    MCInst TmpInst;
6912    TmpInst.setOpcode(Inst.getOpcode() == ARM::t2ADDri ?
6913                      ARM::tADDi8 : ARM::tSUBi8);
6914    TmpInst.addOperand(Inst.getOperand(0));
6915    TmpInst.addOperand(Inst.getOperand(5));
6916    TmpInst.addOperand(Inst.getOperand(0));
6917    TmpInst.addOperand(Inst.getOperand(2));
6918    TmpInst.addOperand(Inst.getOperand(3));
6919    TmpInst.addOperand(Inst.getOperand(4));
6920    Inst = TmpInst;
6921    return true;
6922  }
6923  case ARM::t2ADDrr: {
6924    // If the destination and first source operand are the same, and
6925    // there's no setting of the flags, use encoding T2 instead of T3.
6926    // Note that this is only for ADD, not SUB. This mirrors the system
6927    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
6928    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
6929        Inst.getOperand(5).getReg() != 0 ||
6930        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6931         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
6932      break;
6933    MCInst TmpInst;
6934    TmpInst.setOpcode(ARM::tADDhirr);
6935    TmpInst.addOperand(Inst.getOperand(0));
6936    TmpInst.addOperand(Inst.getOperand(0));
6937    TmpInst.addOperand(Inst.getOperand(2));
6938    TmpInst.addOperand(Inst.getOperand(3));
6939    TmpInst.addOperand(Inst.getOperand(4));
6940    Inst = TmpInst;
6941    return true;
6942  }
6943  case ARM::tB:
6944    // A Thumb conditional branch outside of an IT block is a tBcc.
6945    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
6946      Inst.setOpcode(ARM::tBcc);
6947      return true;
6948    }
6949    break;
6950  case ARM::t2B:
6951    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
6952    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
6953      Inst.setOpcode(ARM::t2Bcc);
6954      return true;
6955    }
6956    break;
6957  case ARM::t2Bcc:
6958    // If the conditional is AL or we're in an IT block, we really want t2B.
6959    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
6960      Inst.setOpcode(ARM::t2B);
6961      return true;
6962    }
6963    break;
6964  case ARM::tBcc:
6965    // If the conditional is AL, we really want tB.
6966    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
6967      Inst.setOpcode(ARM::tB);
6968      return true;
6969    }
6970    break;
6971  case ARM::tLDMIA: {
6972    // If the register list contains any high registers, or if the writeback
6973    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
6974    // instead if we're in Thumb2. Otherwise, this should have generated
6975    // an error in validateInstruction().
6976    unsigned Rn = Inst.getOperand(0).getReg();
6977    bool hasWritebackToken =
6978      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
6979       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
6980    bool listContainsBase;
6981    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
6982        (!listContainsBase && !hasWritebackToken) ||
6983        (listContainsBase && hasWritebackToken)) {
6984      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
6985      assert (isThumbTwo());
6986      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
6987      // If we're switching to the updating version, we need to insert
6988      // the writeback tied operand.
6989      if (hasWritebackToken)
6990        Inst.insert(Inst.begin(),
6991                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
6992      return true;
6993    }
6994    break;
6995  }
6996  case ARM::tSTMIA_UPD: {
6997    // If the register list contains any high registers, we need to use
6998    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
6999    // should have generated an error in validateInstruction().
7000    unsigned Rn = Inst.getOperand(0).getReg();
7001    bool listContainsBase;
7002    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
7003      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
7004      assert (isThumbTwo());
7005      Inst.setOpcode(ARM::t2STMIA_UPD);
7006      return true;
7007    }
7008    break;
7009  }
7010  case ARM::tPOP: {
7011    bool listContainsBase;
7012    // If the register list contains any high registers, we need to use
7013    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
7014    // should have generated an error in validateInstruction().
7015    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
7016      return false;
7017    assert (isThumbTwo());
7018    Inst.setOpcode(ARM::t2LDMIA_UPD);
7019    // Add the base register and writeback operands.
7020    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7021    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7022    return true;
7023  }
7024  case ARM::tPUSH: {
7025    bool listContainsBase;
7026    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
7027      return false;
7028    assert (isThumbTwo());
7029    Inst.setOpcode(ARM::t2STMDB_UPD);
7030    // Add the base register and writeback operands.
7031    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7032    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
7033    return true;
7034  }
7035  case ARM::t2MOVi: {
7036    // If we can use the 16-bit encoding and the user didn't explicitly
7037    // request the 32-bit variant, transform it here.
7038    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7039        (unsigned)Inst.getOperand(1).getImm() <= 255 &&
7040        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
7041         Inst.getOperand(4).getReg() == ARM::CPSR) ||
7042        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
7043        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7044         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7045      // The operands aren't in the same order for tMOVi8...
7046      MCInst TmpInst;
7047      TmpInst.setOpcode(ARM::tMOVi8);
7048      TmpInst.addOperand(Inst.getOperand(0));
7049      TmpInst.addOperand(Inst.getOperand(4));
7050      TmpInst.addOperand(Inst.getOperand(1));
7051      TmpInst.addOperand(Inst.getOperand(2));
7052      TmpInst.addOperand(Inst.getOperand(3));
7053      Inst = TmpInst;
7054      return true;
7055    }
7056    break;
7057  }
7058  case ARM::t2MOVr: {
7059    // If we can use the 16-bit encoding and the user didn't explicitly
7060    // request the 32-bit variant, transform it here.
7061    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7062        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7063        Inst.getOperand(2).getImm() == ARMCC::AL &&
7064        Inst.getOperand(4).getReg() == ARM::CPSR &&
7065        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7066         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7067      // The operands aren't the same for tMOV[S]r... (no cc_out)
7068      MCInst TmpInst;
7069      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
7070      TmpInst.addOperand(Inst.getOperand(0));
7071      TmpInst.addOperand(Inst.getOperand(1));
7072      TmpInst.addOperand(Inst.getOperand(2));
7073      TmpInst.addOperand(Inst.getOperand(3));
7074      Inst = TmpInst;
7075      return true;
7076    }
7077    break;
7078  }
7079  case ARM::t2SXTH:
7080  case ARM::t2SXTB:
7081  case ARM::t2UXTH:
7082  case ARM::t2UXTB: {
7083    // If we can use the 16-bit encoding and the user didn't explicitly
7084    // request the 32-bit variant, transform it here.
7085    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
7086        isARMLowRegister(Inst.getOperand(1).getReg()) &&
7087        Inst.getOperand(2).getImm() == 0 &&
7088        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
7089         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
7090      unsigned NewOpc;
7091      switch (Inst.getOpcode()) {
7092      default: llvm_unreachable("Illegal opcode!");
7093      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
7094      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
7095      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
7096      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
7097      }
7098      // The operands aren't the same for thumb1 (no rotate operand).
7099      MCInst TmpInst;
7100      TmpInst.setOpcode(NewOpc);
7101      TmpInst.addOperand(Inst.getOperand(0));
7102      TmpInst.addOperand(Inst.getOperand(1));
7103      TmpInst.addOperand(Inst.getOperand(3));
7104      TmpInst.addOperand(Inst.getOperand(4));
7105      Inst = TmpInst;
7106      return true;
7107    }
7108    break;
7109  }
7110  case ARM::MOVsi: {
7111    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
7112    if (SOpc == ARM_AM::rrx) return false;
7113    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
7114      // Shifting by zero is accepted as a vanilla 'MOVr'
7115      MCInst TmpInst;
7116      TmpInst.setOpcode(ARM::MOVr);
7117      TmpInst.addOperand(Inst.getOperand(0));
7118      TmpInst.addOperand(Inst.getOperand(1));
7119      TmpInst.addOperand(Inst.getOperand(3));
7120      TmpInst.addOperand(Inst.getOperand(4));
7121      TmpInst.addOperand(Inst.getOperand(5));
7122      Inst = TmpInst;
7123      return true;
7124    }
7125    return false;
7126  }
7127  case ARM::ANDrsi:
7128  case ARM::ORRrsi:
7129  case ARM::EORrsi:
7130  case ARM::BICrsi:
7131  case ARM::SUBrsi:
7132  case ARM::ADDrsi: {
7133    unsigned newOpc;
7134    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(3).getImm());
7135    if (SOpc == ARM_AM::rrx) return false;
7136    switch (Inst.getOpcode()) {
7137    default: llvm_unreachable("unexpected opcode!");
7138    case ARM::ANDrsi: newOpc = ARM::ANDrr; break;
7139    case ARM::ORRrsi: newOpc = ARM::ORRrr; break;
7140    case ARM::EORrsi: newOpc = ARM::EORrr; break;
7141    case ARM::BICrsi: newOpc = ARM::BICrr; break;
7142    case ARM::SUBrsi: newOpc = ARM::SUBrr; break;
7143    case ARM::ADDrsi: newOpc = ARM::ADDrr; break;
7144    }
7145    // If the shift is by zero, use the non-shifted instruction definition.
7146    if (ARM_AM::getSORegOffset(Inst.getOperand(3).getImm()) == 0) {
7147      MCInst TmpInst;
7148      TmpInst.setOpcode(newOpc);
7149      TmpInst.addOperand(Inst.getOperand(0));
7150      TmpInst.addOperand(Inst.getOperand(1));
7151      TmpInst.addOperand(Inst.getOperand(2));
7152      TmpInst.addOperand(Inst.getOperand(4));
7153      TmpInst.addOperand(Inst.getOperand(5));
7154      TmpInst.addOperand(Inst.getOperand(6));
7155      Inst = TmpInst;
7156      return true;
7157    }
7158    return false;
7159  }
7160  case ARM::ITasm:
7161  case ARM::t2IT: {
7162    // The mask bits for all but the first condition are represented as
7163    // the low bit of the condition code value implies 't'. We currently
7164    // always have 1 implies 't', so XOR toggle the bits if the low bit
7165    // of the condition code is zero. The encoding also expects the low
7166    // bit of the condition to be encoded as bit 4 of the mask operand,
7167    // so mask that in if needed
7168    MCOperand &MO = Inst.getOperand(1);
7169    unsigned Mask = MO.getImm();
7170    unsigned OrigMask = Mask;
7171    unsigned TZ = CountTrailingZeros_32(Mask);
7172    if ((Inst.getOperand(0).getImm() & 1) == 0) {
7173      assert(Mask && TZ <= 3 && "illegal IT mask value!");
7174      for (unsigned i = 3; i != TZ; --i)
7175        Mask ^= 1 << i;
7176    } else
7177      Mask |= 0x10;
7178    MO.setImm(Mask);
7179
7180    // Set up the IT block state according to the IT instruction we just
7181    // matched.
7182    assert(!inITBlock() && "nested IT blocks?!");
7183    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
7184    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
7185    ITState.CurPosition = 0;
7186    ITState.FirstCond = true;
7187    break;
7188  }
7189  }
7190  return false;
7191}
7192
7193unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
7194  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
7195  // suffix depending on whether they're in an IT block or not.
7196  unsigned Opc = Inst.getOpcode();
7197  const MCInstrDesc &MCID = getInstDesc(Opc);
7198  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
7199    assert(MCID.hasOptionalDef() &&
7200           "optionally flag setting instruction missing optional def operand");
7201    assert(MCID.NumOperands == Inst.getNumOperands() &&
7202           "operand count mismatch!");
7203    // Find the optional-def operand (cc_out).
7204    unsigned OpNo;
7205    for (OpNo = 0;
7206         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
7207         ++OpNo)
7208      ;
7209    // If we're parsing Thumb1, reject it completely.
7210    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
7211      return Match_MnemonicFail;
7212    // If we're parsing Thumb2, which form is legal depends on whether we're
7213    // in an IT block.
7214    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
7215        !inITBlock())
7216      return Match_RequiresITBlock;
7217    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
7218        inITBlock())
7219      return Match_RequiresNotITBlock;
7220  }
7221  // Some high-register supporting Thumb1 encodings only allow both registers
7222  // to be from r0-r7 when in Thumb2.
7223  else if (Opc == ARM::tADDhirr && isThumbOne() &&
7224           isARMLowRegister(Inst.getOperand(1).getReg()) &&
7225           isARMLowRegister(Inst.getOperand(2).getReg()))
7226    return Match_RequiresThumb2;
7227  // Others only require ARMv6 or later.
7228  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
7229           isARMLowRegister(Inst.getOperand(0).getReg()) &&
7230           isARMLowRegister(Inst.getOperand(1).getReg()))
7231    return Match_RequiresV6;
7232  return Match_Success;
7233}
7234
7235bool ARMAsmParser::
7236MatchAndEmitInstruction(SMLoc IDLoc,
7237                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
7238                        MCStreamer &Out) {
7239  MCInst Inst;
7240  unsigned ErrorInfo;
7241  unsigned MatchResult;
7242  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
7243  switch (MatchResult) {
7244  default: break;
7245  case Match_Success:
7246    // Context sensitive operand constraints aren't handled by the matcher,
7247    // so check them here.
7248    if (validateInstruction(Inst, Operands)) {
7249      // Still progress the IT block, otherwise one wrong condition causes
7250      // nasty cascading errors.
7251      forwardITPosition();
7252      return true;
7253    }
7254
7255    // Some instructions need post-processing to, for example, tweak which
7256    // encoding is selected. Loop on it while changes happen so the
7257    // individual transformations can chain off each other. E.g.,
7258    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
7259    while (processInstruction(Inst, Operands))
7260      ;
7261
7262    // Only move forward at the very end so that everything in validate
7263    // and process gets a consistent answer about whether we're in an IT
7264    // block.
7265    forwardITPosition();
7266
7267    // ITasm is an ARM mode pseudo-instruction that just sets the ITblock and
7268    // doesn't actually encode.
7269    if (Inst.getOpcode() == ARM::ITasm)
7270      return false;
7271
7272    Inst.setLoc(IDLoc);
7273    Out.EmitInstruction(Inst);
7274    return false;
7275  case Match_MissingFeature:
7276    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
7277    return true;
7278  case Match_InvalidOperand: {
7279    SMLoc ErrorLoc = IDLoc;
7280    if (ErrorInfo != ~0U) {
7281      if (ErrorInfo >= Operands.size())
7282        return Error(IDLoc, "too few operands for instruction");
7283
7284      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
7285      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
7286    }
7287
7288    return Error(ErrorLoc, "invalid operand for instruction");
7289  }
7290  case Match_MnemonicFail:
7291    return Error(IDLoc, "invalid instruction");
7292  case Match_ConversionFail:
7293    // The converter function will have already emited a diagnostic.
7294    return true;
7295  case Match_RequiresNotITBlock:
7296    return Error(IDLoc, "flag setting instruction only valid outside IT block");
7297  case Match_RequiresITBlock:
7298    return Error(IDLoc, "instruction only valid inside IT block");
7299  case Match_RequiresV6:
7300    return Error(IDLoc, "instruction variant requires ARMv6 or later");
7301  case Match_RequiresThumb2:
7302    return Error(IDLoc, "instruction variant requires Thumb2");
7303  }
7304
7305  llvm_unreachable("Implement any new match types added!");
7306}
7307
7308/// parseDirective parses the arm specific directives
7309bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
7310  StringRef IDVal = DirectiveID.getIdentifier();
7311  if (IDVal == ".word")
7312    return parseDirectiveWord(4, DirectiveID.getLoc());
7313  else if (IDVal == ".thumb")
7314    return parseDirectiveThumb(DirectiveID.getLoc());
7315  else if (IDVal == ".arm")
7316    return parseDirectiveARM(DirectiveID.getLoc());
7317  else if (IDVal == ".thumb_func")
7318    return parseDirectiveThumbFunc(DirectiveID.getLoc());
7319  else if (IDVal == ".code")
7320    return parseDirectiveCode(DirectiveID.getLoc());
7321  else if (IDVal == ".syntax")
7322    return parseDirectiveSyntax(DirectiveID.getLoc());
7323  else if (IDVal == ".unreq")
7324    return parseDirectiveUnreq(DirectiveID.getLoc());
7325  else if (IDVal == ".arch")
7326    return parseDirectiveArch(DirectiveID.getLoc());
7327  else if (IDVal == ".eabi_attribute")
7328    return parseDirectiveEabiAttr(DirectiveID.getLoc());
7329  return true;
7330}
7331
7332/// parseDirectiveWord
7333///  ::= .word [ expression (, expression)* ]
7334bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
7335  if (getLexer().isNot(AsmToken::EndOfStatement)) {
7336    for (;;) {
7337      const MCExpr *Value;
7338      if (getParser().ParseExpression(Value))
7339        return true;
7340
7341      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
7342
7343      if (getLexer().is(AsmToken::EndOfStatement))
7344        break;
7345
7346      // FIXME: Improve diagnostic.
7347      if (getLexer().isNot(AsmToken::Comma))
7348        return Error(L, "unexpected token in directive");
7349      Parser.Lex();
7350    }
7351  }
7352
7353  Parser.Lex();
7354  return false;
7355}
7356
7357/// parseDirectiveThumb
7358///  ::= .thumb
7359bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
7360  if (getLexer().isNot(AsmToken::EndOfStatement))
7361    return Error(L, "unexpected token in directive");
7362  Parser.Lex();
7363
7364  if (!isThumb())
7365    SwitchMode();
7366  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7367  return false;
7368}
7369
7370/// parseDirectiveARM
7371///  ::= .arm
7372bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
7373  if (getLexer().isNot(AsmToken::EndOfStatement))
7374    return Error(L, "unexpected token in directive");
7375  Parser.Lex();
7376
7377  if (isThumb())
7378    SwitchMode();
7379  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7380  return false;
7381}
7382
7383/// parseDirectiveThumbFunc
7384///  ::= .thumbfunc symbol_name
7385bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
7386  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
7387  bool isMachO = MAI.hasSubsectionsViaSymbols();
7388  StringRef Name;
7389  bool needFuncName = true;
7390
7391  // Darwin asm has (optionally) function name after .thumb_func direction
7392  // ELF doesn't
7393  if (isMachO) {
7394    const AsmToken &Tok = Parser.getTok();
7395    if (Tok.isNot(AsmToken::EndOfStatement)) {
7396      if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
7397        return Error(L, "unexpected token in .thumb_func directive");
7398      Name = Tok.getIdentifier();
7399      Parser.Lex(); // Consume the identifier token.
7400      needFuncName = false;
7401    }
7402  }
7403
7404  if (getLexer().isNot(AsmToken::EndOfStatement))
7405    return Error(L, "unexpected token in directive");
7406
7407  // Eat the end of statement and any blank lines that follow.
7408  while (getLexer().is(AsmToken::EndOfStatement))
7409    Parser.Lex();
7410
7411  // FIXME: assuming function name will be the line following .thumb_func
7412  // We really should be checking the next symbol definition even if there's
7413  // stuff in between.
7414  if (needFuncName) {
7415    Name = Parser.getTok().getIdentifier();
7416  }
7417
7418  // Mark symbol as a thumb symbol.
7419  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
7420  getParser().getStreamer().EmitThumbFunc(Func);
7421  return false;
7422}
7423
7424/// parseDirectiveSyntax
7425///  ::= .syntax unified | divided
7426bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
7427  const AsmToken &Tok = Parser.getTok();
7428  if (Tok.isNot(AsmToken::Identifier))
7429    return Error(L, "unexpected token in .syntax directive");
7430  StringRef Mode = Tok.getString();
7431  if (Mode == "unified" || Mode == "UNIFIED")
7432    Parser.Lex();
7433  else if (Mode == "divided" || Mode == "DIVIDED")
7434    return Error(L, "'.syntax divided' arm asssembly not supported");
7435  else
7436    return Error(L, "unrecognized syntax mode in .syntax directive");
7437
7438  if (getLexer().isNot(AsmToken::EndOfStatement))
7439    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7440  Parser.Lex();
7441
7442  // TODO tell the MC streamer the mode
7443  // getParser().getStreamer().Emit???();
7444  return false;
7445}
7446
7447/// parseDirectiveCode
7448///  ::= .code 16 | 32
7449bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
7450  const AsmToken &Tok = Parser.getTok();
7451  if (Tok.isNot(AsmToken::Integer))
7452    return Error(L, "unexpected token in .code directive");
7453  int64_t Val = Parser.getTok().getIntVal();
7454  if (Val == 16)
7455    Parser.Lex();
7456  else if (Val == 32)
7457    Parser.Lex();
7458  else
7459    return Error(L, "invalid operand to .code directive");
7460
7461  if (getLexer().isNot(AsmToken::EndOfStatement))
7462    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
7463  Parser.Lex();
7464
7465  if (Val == 16) {
7466    if (!isThumb())
7467      SwitchMode();
7468    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
7469  } else {
7470    if (isThumb())
7471      SwitchMode();
7472    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
7473  }
7474
7475  return false;
7476}
7477
7478/// parseDirectiveReq
7479///  ::= name .req registername
7480bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
7481  Parser.Lex(); // Eat the '.req' token.
7482  unsigned Reg;
7483  SMLoc SRegLoc, ERegLoc;
7484  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
7485    Parser.EatToEndOfStatement();
7486    return Error(SRegLoc, "register name expected");
7487  }
7488
7489  // Shouldn't be anything else.
7490  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
7491    Parser.EatToEndOfStatement();
7492    return Error(Parser.getTok().getLoc(),
7493                 "unexpected input in .req directive.");
7494  }
7495
7496  Parser.Lex(); // Consume the EndOfStatement
7497
7498  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
7499    return Error(SRegLoc, "redefinition of '" + Name +
7500                          "' does not match original.");
7501
7502  return false;
7503}
7504
7505/// parseDirectiveUneq
7506///  ::= .unreq registername
7507bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
7508  if (Parser.getTok().isNot(AsmToken::Identifier)) {
7509    Parser.EatToEndOfStatement();
7510    return Error(L, "unexpected input in .unreq directive.");
7511  }
7512  RegisterReqs.erase(Parser.getTok().getIdentifier());
7513  Parser.Lex(); // Eat the identifier.
7514  return false;
7515}
7516
7517/// parseDirectiveArch
7518///  ::= .arch token
7519bool ARMAsmParser::parseDirectiveArch(SMLoc L) {
7520  return true;
7521}
7522
7523/// parseDirectiveEabiAttr
7524///  ::= .eabi_attribute int, int
7525bool ARMAsmParser::parseDirectiveEabiAttr(SMLoc L) {
7526  return true;
7527}
7528
7529extern "C" void LLVMInitializeARMAsmLexer();
7530
7531/// Force static initialization.
7532extern "C" void LLVMInitializeARMAsmParser() {
7533  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
7534  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
7535  LLVMInitializeARMAsmLexer();
7536}
7537
7538#define GET_REGISTER_MATCHER
7539#define GET_MATCHER_IMPLEMENTATION
7540#include "ARMGenAsmMatcher.inc"
7541