ARMAsmParser.cpp revision 9c39789c361d4fe2632f28fca74c9ea5fff3dafc
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104
105  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
106                          bool &CarrySetting, unsigned &ProcessorIMod,
107                          StringRef &ITMask);
108  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
109                             bool &CanAcceptPredicationCode);
110
111  bool isThumb() const {
112    // FIXME: Can tablegen auto-generate this?
113    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
114  }
115  bool isThumbOne() const {
116    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
117  }
118  bool isThumbTwo() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
120  }
121  bool hasV6Ops() const {
122    return STI.getFeatureBits() & ARM::HasV6Ops;
123  }
124  bool hasV7Ops() const {
125    return STI.getFeatureBits() & ARM::HasV7Ops;
126  }
127  void SwitchMode() {
128    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
129    setAvailableFeatures(FB);
130  }
131  bool isMClass() const {
132    return STI.getFeatureBits() & ARM::FeatureMClass;
133  }
134
135  /// @name Auto-generated Match Functions
136  /// {
137
138#define GET_ASSEMBLER_HEADER
139#include "ARMGenAsmMatcher.inc"
140
141  /// }
142
143  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
144  OperandMatchResultTy parseCoprocNumOperand(
145    SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocRegOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocOptionOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseMemBarrierOptOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseProcIFlagsOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseMSRMaskOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
157                                   StringRef Op, int Low, int High);
158  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
159    return parsePKHImm(O, "lsl", 0, 31);
160  }
161  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "asr", 1, 32);
163  }
164  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
173
174  // Asm Match Converter Methods
175  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
176                    const SmallVectorImpl<MCParsedAsmOperand*> &);
177  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
180                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
194                             const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
202                  const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
206                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
208                        const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
210                     const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
212                        const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
214                     const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
216                        const SmallVectorImpl<MCParsedAsmOperand*> &);
217
218  bool validateInstruction(MCInst &Inst,
219                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
220  bool processInstruction(MCInst &Inst,
221                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool shouldOmitCCOutOperand(StringRef Mnemonic,
223                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
224
225public:
226  enum ARMMatchResultTy {
227    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
228    Match_RequiresNotITBlock,
229    Match_RequiresV6,
230    Match_RequiresThumb2
231  };
232
233  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
234    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
235    MCAsmParserExtension::Initialize(_Parser);
236
237    // Initialize the set of available features.
238    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
239
240    // Not in an ITBlock to start with.
241    ITState.CurPosition = ~0U;
242  }
243
244  // Implementation of the MCTargetAsmParser interface:
245  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
246  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
247                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
248  bool ParseDirective(AsmToken DirectiveID);
249
250  unsigned checkTargetMatchPredicate(MCInst &Inst);
251
252  bool MatchAndEmitInstruction(SMLoc IDLoc,
253                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
254                               MCStreamer &Out);
255};
256} // end anonymous namespace
257
258namespace {
259
260/// ARMOperand - Instances of this class represent a parsed ARM machine
261/// instruction.
262class ARMOperand : public MCParsedAsmOperand {
263  enum KindTy {
264    k_CondCode,
265    k_CCOut,
266    k_ITCondMask,
267    k_CoprocNum,
268    k_CoprocReg,
269    k_CoprocOption,
270    k_Immediate,
271    k_FPImmediate,
272    k_MemBarrierOpt,
273    k_Memory,
274    k_PostIndexRegister,
275    k_MSRMask,
276    k_ProcIFlags,
277    k_VectorIndex,
278    k_Register,
279    k_RegisterList,
280    k_DPRRegisterList,
281    k_SPRRegisterList,
282    k_VectorList,
283    k_VectorListAllLanes,
284    k_VectorListIndexed,
285    k_ShiftedRegister,
286    k_ShiftedImmediate,
287    k_ShifterImmediate,
288    k_RotateImmediate,
289    k_BitfieldDescriptor,
290    k_Token
291  } Kind;
292
293  SMLoc StartLoc, EndLoc;
294  SmallVector<unsigned, 8> Registers;
295
296  union {
297    struct {
298      ARMCC::CondCodes Val;
299    } CC;
300
301    struct {
302      unsigned Val;
303    } Cop;
304
305    struct {
306      unsigned Val;
307    } CoprocOption;
308
309    struct {
310      unsigned Mask:4;
311    } ITMask;
312
313    struct {
314      ARM_MB::MemBOpt Val;
315    } MBOpt;
316
317    struct {
318      ARM_PROC::IFlags Val;
319    } IFlags;
320
321    struct {
322      unsigned Val;
323    } MMask;
324
325    struct {
326      const char *Data;
327      unsigned Length;
328    } Tok;
329
330    struct {
331      unsigned RegNum;
332    } Reg;
333
334    // A vector register list is a sequential list of 1 to 4 registers.
335    struct {
336      unsigned RegNum;
337      unsigned Count;
338      unsigned LaneIndex;
339      bool isDoubleSpaced;
340    } VectorList;
341
342    struct {
343      unsigned Val;
344    } VectorIndex;
345
346    struct {
347      const MCExpr *Val;
348    } Imm;
349
350    struct {
351      unsigned Val;       // encoded 8-bit representation
352    } FPImm;
353
354    /// Combined record for all forms of ARM address expressions.
355    struct {
356      unsigned BaseRegNum;
357      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
358      // was specified.
359      const MCConstantExpr *OffsetImm;  // Offset immediate value
360      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
361      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
362      unsigned ShiftImm;        // shift for OffsetReg.
363      unsigned Alignment;       // 0 = no alignment specified
364                                // n = alignment in bytes (2, 4, 8, 16, or 32)
365      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
366    } Memory;
367
368    struct {
369      unsigned RegNum;
370      bool isAdd;
371      ARM_AM::ShiftOpc ShiftTy;
372      unsigned ShiftImm;
373    } PostIdxReg;
374
375    struct {
376      bool isASR;
377      unsigned Imm;
378    } ShifterImm;
379    struct {
380      ARM_AM::ShiftOpc ShiftTy;
381      unsigned SrcReg;
382      unsigned ShiftReg;
383      unsigned ShiftImm;
384    } RegShiftedReg;
385    struct {
386      ARM_AM::ShiftOpc ShiftTy;
387      unsigned SrcReg;
388      unsigned ShiftImm;
389    } RegShiftedImm;
390    struct {
391      unsigned Imm;
392    } RotImm;
393    struct {
394      unsigned LSB;
395      unsigned Width;
396    } Bitfield;
397  };
398
399  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
400public:
401  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
402    Kind = o.Kind;
403    StartLoc = o.StartLoc;
404    EndLoc = o.EndLoc;
405    switch (Kind) {
406    case k_CondCode:
407      CC = o.CC;
408      break;
409    case k_ITCondMask:
410      ITMask = o.ITMask;
411      break;
412    case k_Token:
413      Tok = o.Tok;
414      break;
415    case k_CCOut:
416    case k_Register:
417      Reg = o.Reg;
418      break;
419    case k_RegisterList:
420    case k_DPRRegisterList:
421    case k_SPRRegisterList:
422      Registers = o.Registers;
423      break;
424    case k_VectorList:
425    case k_VectorListAllLanes:
426    case k_VectorListIndexed:
427      VectorList = o.VectorList;
428      break;
429    case k_CoprocNum:
430    case k_CoprocReg:
431      Cop = o.Cop;
432      break;
433    case k_CoprocOption:
434      CoprocOption = o.CoprocOption;
435      break;
436    case k_Immediate:
437      Imm = o.Imm;
438      break;
439    case k_FPImmediate:
440      FPImm = o.FPImm;
441      break;
442    case k_MemBarrierOpt:
443      MBOpt = o.MBOpt;
444      break;
445    case k_Memory:
446      Memory = o.Memory;
447      break;
448    case k_PostIndexRegister:
449      PostIdxReg = o.PostIdxReg;
450      break;
451    case k_MSRMask:
452      MMask = o.MMask;
453      break;
454    case k_ProcIFlags:
455      IFlags = o.IFlags;
456      break;
457    case k_ShifterImmediate:
458      ShifterImm = o.ShifterImm;
459      break;
460    case k_ShiftedRegister:
461      RegShiftedReg = o.RegShiftedReg;
462      break;
463    case k_ShiftedImmediate:
464      RegShiftedImm = o.RegShiftedImm;
465      break;
466    case k_RotateImmediate:
467      RotImm = o.RotImm;
468      break;
469    case k_BitfieldDescriptor:
470      Bitfield = o.Bitfield;
471      break;
472    case k_VectorIndex:
473      VectorIndex = o.VectorIndex;
474      break;
475    }
476  }
477
478  /// getStartLoc - Get the location of the first token of this operand.
479  SMLoc getStartLoc() const { return StartLoc; }
480  /// getEndLoc - Get the location of the last token of this operand.
481  SMLoc getEndLoc() const { return EndLoc; }
482
483  ARMCC::CondCodes getCondCode() const {
484    assert(Kind == k_CondCode && "Invalid access!");
485    return CC.Val;
486  }
487
488  unsigned getCoproc() const {
489    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
490    return Cop.Val;
491  }
492
493  StringRef getToken() const {
494    assert(Kind == k_Token && "Invalid access!");
495    return StringRef(Tok.Data, Tok.Length);
496  }
497
498  unsigned getReg() const {
499    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
500    return Reg.RegNum;
501  }
502
503  const SmallVectorImpl<unsigned> &getRegList() const {
504    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
505            Kind == k_SPRRegisterList) && "Invalid access!");
506    return Registers;
507  }
508
509  const MCExpr *getImm() const {
510    assert(Kind == k_Immediate && "Invalid access!");
511    return Imm.Val;
512  }
513
514  unsigned getFPImm() const {
515    assert(Kind == k_FPImmediate && "Invalid access!");
516    return FPImm.Val;
517  }
518
519  unsigned getVectorIndex() const {
520    assert(Kind == k_VectorIndex && "Invalid access!");
521    return VectorIndex.Val;
522  }
523
524  ARM_MB::MemBOpt getMemBarrierOpt() const {
525    assert(Kind == k_MemBarrierOpt && "Invalid access!");
526    return MBOpt.Val;
527  }
528
529  ARM_PROC::IFlags getProcIFlags() const {
530    assert(Kind == k_ProcIFlags && "Invalid access!");
531    return IFlags.Val;
532  }
533
534  unsigned getMSRMask() const {
535    assert(Kind == k_MSRMask && "Invalid access!");
536    return MMask.Val;
537  }
538
539  bool isCoprocNum() const { return Kind == k_CoprocNum; }
540  bool isCoprocReg() const { return Kind == k_CoprocReg; }
541  bool isCoprocOption() const { return Kind == k_CoprocOption; }
542  bool isCondCode() const { return Kind == k_CondCode; }
543  bool isCCOut() const { return Kind == k_CCOut; }
544  bool isITMask() const { return Kind == k_ITCondMask; }
545  bool isITCondCode() const { return Kind == k_CondCode; }
546  bool isImm() const { return Kind == k_Immediate; }
547  bool isFPImm() const { return Kind == k_FPImmediate; }
548  bool isImm8s4() const {
549    if (Kind != k_Immediate)
550      return false;
551    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
552    if (!CE) return false;
553    int64_t Value = CE->getValue();
554    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
555  }
556  bool isImm0_1020s4() const {
557    if (Kind != k_Immediate)
558      return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
563  }
564  bool isImm0_508s4() const {
565    if (Kind != k_Immediate)
566      return false;
567    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
568    if (!CE) return false;
569    int64_t Value = CE->getValue();
570    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
571  }
572  bool isImm0_255() const {
573    if (Kind != k_Immediate)
574      return false;
575    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
576    if (!CE) return false;
577    int64_t Value = CE->getValue();
578    return Value >= 0 && Value < 256;
579  }
580  bool isImm0_1() const {
581    if (Kind != k_Immediate)
582      return false;
583    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
584    if (!CE) return false;
585    int64_t Value = CE->getValue();
586    return Value >= 0 && Value < 2;
587  }
588  bool isImm0_3() const {
589    if (Kind != k_Immediate)
590      return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 4;
595  }
596  bool isImm0_7() const {
597    if (Kind != k_Immediate)
598      return false;
599    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
600    if (!CE) return false;
601    int64_t Value = CE->getValue();
602    return Value >= 0 && Value < 8;
603  }
604  bool isImm0_15() const {
605    if (Kind != k_Immediate)
606      return false;
607    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
608    if (!CE) return false;
609    int64_t Value = CE->getValue();
610    return Value >= 0 && Value < 16;
611  }
612  bool isImm0_31() const {
613    if (Kind != k_Immediate)
614      return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (Kind != k_Immediate)
622      return false;
623    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
624    if (!CE) return false;
625    int64_t Value = CE->getValue();
626    return Value >= 0 && Value < 64;
627  }
628  bool isImm8() const {
629    if (Kind != k_Immediate)
630      return false;
631    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
632    if (!CE) return false;
633    int64_t Value = CE->getValue();
634    return Value == 8;
635  }
636  bool isImm16() const {
637    if (Kind != k_Immediate)
638      return false;
639    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
640    if (!CE) return false;
641    int64_t Value = CE->getValue();
642    return Value == 16;
643  }
644  bool isImm32() const {
645    if (Kind != k_Immediate)
646      return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (Kind != k_Immediate)
654      return false;
655    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
656    if (!CE) return false;
657    int64_t Value = CE->getValue();
658    return Value > 0 && Value <= 8;
659  }
660  bool isShrImm16() const {
661    if (Kind != k_Immediate)
662      return false;
663    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664    if (!CE) return false;
665    int64_t Value = CE->getValue();
666    return Value > 0 && Value <= 16;
667  }
668  bool isShrImm32() const {
669    if (Kind != k_Immediate)
670      return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 32;
675  }
676  bool isShrImm64() const {
677    if (Kind != k_Immediate)
678      return false;
679    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
680    if (!CE) return false;
681    int64_t Value = CE->getValue();
682    return Value > 0 && Value <= 64;
683  }
684  bool isImm1_7() const {
685    if (Kind != k_Immediate)
686      return false;
687    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
688    if (!CE) return false;
689    int64_t Value = CE->getValue();
690    return Value > 0 && Value < 8;
691  }
692  bool isImm1_15() const {
693    if (Kind != k_Immediate)
694      return false;
695    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
696    if (!CE) return false;
697    int64_t Value = CE->getValue();
698    return Value > 0 && Value < 16;
699  }
700  bool isImm1_31() const {
701    if (Kind != k_Immediate)
702      return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 32;
707  }
708  bool isImm1_16() const {
709    if (Kind != k_Immediate)
710      return false;
711    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712    if (!CE) return false;
713    int64_t Value = CE->getValue();
714    return Value > 0 && Value < 17;
715  }
716  bool isImm1_32() const {
717    if (Kind != k_Immediate)
718      return false;
719    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
720    if (!CE) return false;
721    int64_t Value = CE->getValue();
722    return Value > 0 && Value < 33;
723  }
724  bool isImm0_32() const {
725    if (Kind != k_Immediate)
726      return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    if (!CE) return false;
729    int64_t Value = CE->getValue();
730    return Value >= 0 && Value < 33;
731  }
732  bool isImm0_65535() const {
733    if (Kind != k_Immediate)
734      return false;
735    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
736    if (!CE) return false;
737    int64_t Value = CE->getValue();
738    return Value >= 0 && Value < 65536;
739  }
740  bool isImm0_65535Expr() const {
741    if (Kind != k_Immediate)
742      return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    // If it's not a constant expression, it'll generate a fixup and be
745    // handled later.
746    if (!CE) return true;
747    int64_t Value = CE->getValue();
748    return Value >= 0 && Value < 65536;
749  }
750  bool isImm24bit() const {
751    if (Kind != k_Immediate)
752      return false;
753    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754    if (!CE) return false;
755    int64_t Value = CE->getValue();
756    return Value >= 0 && Value <= 0xffffff;
757  }
758  bool isImmThumbSR() const {
759    if (Kind != k_Immediate)
760      return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value < 33;
765  }
766  bool isPKHLSLImm() const {
767    if (Kind != k_Immediate)
768      return false;
769    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
770    if (!CE) return false;
771    int64_t Value = CE->getValue();
772    return Value >= 0 && Value < 32;
773  }
774  bool isPKHASRImm() const {
775    if (Kind != k_Immediate)
776      return false;
777    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778    if (!CE) return false;
779    int64_t Value = CE->getValue();
780    return Value > 0 && Value <= 32;
781  }
782  bool isARMSOImm() const {
783    if (Kind != k_Immediate)
784      return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getSOImmVal(Value) != -1;
789  }
790  bool isARMSOImmNot() const {
791    if (Kind != k_Immediate)
792      return false;
793    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
794    if (!CE) return false;
795    int64_t Value = CE->getValue();
796    return ARM_AM::getSOImmVal(~Value) != -1;
797  }
798  bool isARMSOImmNeg() const {
799    if (Kind != k_Immediate)
800      return false;
801    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
802    if (!CE) return false;
803    int64_t Value = CE->getValue();
804    return ARM_AM::getSOImmVal(-Value) != -1;
805  }
806  bool isT2SOImm() const {
807    if (Kind != k_Immediate)
808      return false;
809    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
810    if (!CE) return false;
811    int64_t Value = CE->getValue();
812    return ARM_AM::getT2SOImmVal(Value) != -1;
813  }
814  bool isT2SOImmNot() const {
815    if (Kind != k_Immediate)
816      return false;
817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
818    if (!CE) return false;
819    int64_t Value = CE->getValue();
820    return ARM_AM::getT2SOImmVal(~Value) != -1;
821  }
822  bool isT2SOImmNeg() const {
823    if (Kind != k_Immediate)
824      return false;
825    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
826    if (!CE) return false;
827    int64_t Value = CE->getValue();
828    return ARM_AM::getT2SOImmVal(-Value) != -1;
829  }
830  bool isSetEndImm() const {
831    if (Kind != k_Immediate)
832      return false;
833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
834    if (!CE) return false;
835    int64_t Value = CE->getValue();
836    return Value == 1 || Value == 0;
837  }
838  bool isReg() const { return Kind == k_Register; }
839  bool isRegList() const { return Kind == k_RegisterList; }
840  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
841  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
842  bool isToken() const { return Kind == k_Token; }
843  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
844  bool isMemory() const { return Kind == k_Memory; }
845  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
846  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
847  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
848  bool isRotImm() const { return Kind == k_RotateImmediate; }
849  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
850  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
851  bool isPostIdxReg() const {
852    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
853  }
854  bool isMemNoOffset(bool alignOK = false) const {
855    if (!isMemory())
856      return false;
857    // No offset of any kind.
858    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
859     (alignOK || Memory.Alignment == 0);
860  }
861  bool isAlignedMemory() const {
862    return isMemNoOffset(true);
863  }
864  bool isAddrMode2() const {
865    if (!isMemory() || Memory.Alignment != 0) return false;
866    // Check for register offset.
867    if (Memory.OffsetRegNum) return true;
868    // Immediate offset in range [-4095, 4095].
869    if (!Memory.OffsetImm) return true;
870    int64_t Val = Memory.OffsetImm->getValue();
871    return Val > -4096 && Val < 4096;
872  }
873  bool isAM2OffsetImm() const {
874    if (Kind != k_Immediate)
875      return false;
876    // Immediate offset in range [-4095, 4095].
877    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
878    if (!CE) return false;
879    int64_t Val = CE->getValue();
880    return Val > -4096 && Val < 4096;
881  }
882  bool isAddrMode3() const {
883    if (!isMemory() || Memory.Alignment != 0) return false;
884    // No shifts are legal for AM3.
885    if (Memory.ShiftType != ARM_AM::no_shift) return false;
886    // Check for register offset.
887    if (Memory.OffsetRegNum) return true;
888    // Immediate offset in range [-255, 255].
889    if (!Memory.OffsetImm) return true;
890    int64_t Val = Memory.OffsetImm->getValue();
891    return Val > -256 && Val < 256;
892  }
893  bool isAM3Offset() const {
894    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
895      return false;
896    if (Kind == k_PostIndexRegister)
897      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
898    // Immediate offset in range [-255, 255].
899    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
900    if (!CE) return false;
901    int64_t Val = CE->getValue();
902    // Special case, #-0 is INT32_MIN.
903    return (Val > -256 && Val < 256) || Val == INT32_MIN;
904  }
905  bool isAddrMode5() const {
906    // If we have an immediate that's not a constant, treat it as a label
907    // reference needing a fixup. If it is a constant, it's something else
908    // and we reject it.
909    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
910      return true;
911    if (!isMemory() || Memory.Alignment != 0) return false;
912    // Check for register offset.
913    if (Memory.OffsetRegNum) return false;
914    // Immediate offset in range [-1020, 1020] and a multiple of 4.
915    if (!Memory.OffsetImm) return true;
916    int64_t Val = Memory.OffsetImm->getValue();
917    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
918      Val == INT32_MIN;
919  }
920  bool isMemTBB() const {
921    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
922        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
923      return false;
924    return true;
925  }
926  bool isMemTBH() const {
927    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
928        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
929        Memory.Alignment != 0 )
930      return false;
931    return true;
932  }
933  bool isMemRegOffset() const {
934    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
935      return false;
936    return true;
937  }
938  bool isT2MemRegOffset() const {
939    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
940        Memory.Alignment != 0)
941      return false;
942    // Only lsl #{0, 1, 2, 3} allowed.
943    if (Memory.ShiftType == ARM_AM::no_shift)
944      return true;
945    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
946      return false;
947    return true;
948  }
949  bool isMemThumbRR() const {
950    // Thumb reg+reg addressing is simple. Just two registers, a base and
951    // an offset. No shifts, negations or any other complicating factors.
952    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
953        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
954      return false;
955    return isARMLowRegister(Memory.BaseRegNum) &&
956      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
957  }
958  bool isMemThumbRIs4() const {
959    if (!isMemory() || Memory.OffsetRegNum != 0 ||
960        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
961      return false;
962    // Immediate offset, multiple of 4 in range [0, 124].
963    if (!Memory.OffsetImm) return true;
964    int64_t Val = Memory.OffsetImm->getValue();
965    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
966  }
967  bool isMemThumbRIs2() const {
968    if (!isMemory() || Memory.OffsetRegNum != 0 ||
969        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
970      return false;
971    // Immediate offset, multiple of 4 in range [0, 62].
972    if (!Memory.OffsetImm) return true;
973    int64_t Val = Memory.OffsetImm->getValue();
974    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
975  }
976  bool isMemThumbRIs1() const {
977    if (!isMemory() || Memory.OffsetRegNum != 0 ||
978        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
979      return false;
980    // Immediate offset in range [0, 31].
981    if (!Memory.OffsetImm) return true;
982    int64_t Val = Memory.OffsetImm->getValue();
983    return Val >= 0 && Val <= 31;
984  }
985  bool isMemThumbSPI() const {
986    if (!isMemory() || Memory.OffsetRegNum != 0 ||
987        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
988      return false;
989    // Immediate offset, multiple of 4 in range [0, 1020].
990    if (!Memory.OffsetImm) return true;
991    int64_t Val = Memory.OffsetImm->getValue();
992    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
993  }
994  bool isMemImm8s4Offset() const {
995    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
996      return false;
997    // Immediate offset a multiple of 4 in range [-1020, 1020].
998    if (!Memory.OffsetImm) return true;
999    int64_t Val = Memory.OffsetImm->getValue();
1000    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1001  }
1002  bool isMemImm0_1020s4Offset() const {
1003    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1004      return false;
1005    // Immediate offset a multiple of 4 in range [0, 1020].
1006    if (!Memory.OffsetImm) return true;
1007    int64_t Val = Memory.OffsetImm->getValue();
1008    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1009  }
1010  bool isMemImm8Offset() const {
1011    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1012      return false;
1013    // Immediate offset in range [-255, 255].
1014    if (!Memory.OffsetImm) return true;
1015    int64_t Val = Memory.OffsetImm->getValue();
1016    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1017  }
1018  bool isMemPosImm8Offset() const {
1019    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1020      return false;
1021    // Immediate offset in range [0, 255].
1022    if (!Memory.OffsetImm) return true;
1023    int64_t Val = Memory.OffsetImm->getValue();
1024    return Val >= 0 && Val < 256;
1025  }
1026  bool isMemNegImm8Offset() const {
1027    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1028      return false;
1029    // Immediate offset in range [-255, -1].
1030    if (!Memory.OffsetImm) return false;
1031    int64_t Val = Memory.OffsetImm->getValue();
1032    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1033  }
1034  bool isMemUImm12Offset() const {
1035    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1036      return false;
1037    // Immediate offset in range [0, 4095].
1038    if (!Memory.OffsetImm) return true;
1039    int64_t Val = Memory.OffsetImm->getValue();
1040    return (Val >= 0 && Val < 4096);
1041  }
1042  bool isMemImm12Offset() const {
1043    // If we have an immediate that's not a constant, treat it as a label
1044    // reference needing a fixup. If it is a constant, it's something else
1045    // and we reject it.
1046    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1047      return true;
1048
1049    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1050      return false;
1051    // Immediate offset in range [-4095, 4095].
1052    if (!Memory.OffsetImm) return true;
1053    int64_t Val = Memory.OffsetImm->getValue();
1054    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1055  }
1056  bool isPostIdxImm8() const {
1057    if (Kind != k_Immediate)
1058      return false;
1059    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1060    if (!CE) return false;
1061    int64_t Val = CE->getValue();
1062    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1063  }
1064  bool isPostIdxImm8s4() const {
1065    if (Kind != k_Immediate)
1066      return false;
1067    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1068    if (!CE) return false;
1069    int64_t Val = CE->getValue();
1070    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1071      (Val == INT32_MIN);
1072  }
1073
1074  bool isMSRMask() const { return Kind == k_MSRMask; }
1075  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1076
1077  // NEON operands.
1078  bool isSingleSpacedVectorList() const {
1079    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1080  }
1081  bool isDoubleSpacedVectorList() const {
1082    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1083  }
1084  bool isVecListOneD() const {
1085    if (!isSingleSpacedVectorList()) return false;
1086    return VectorList.Count == 1;
1087  }
1088
1089  bool isVecListTwoD() const {
1090    if (!isSingleSpacedVectorList()) return false;
1091    return VectorList.Count == 2;
1092  }
1093
1094  bool isVecListThreeD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 3;
1097  }
1098
1099  bool isVecListFourD() const {
1100    if (!isSingleSpacedVectorList()) return false;
1101    return VectorList.Count == 4;
1102  }
1103
1104  bool isVecListTwoQ() const {
1105    if (!isDoubleSpacedVectorList()) return false;
1106    return VectorList.Count == 2;
1107  }
1108
1109  bool isVecListOneDAllLanes() const {
1110    if (Kind != k_VectorListAllLanes) return false;
1111    return VectorList.Count == 1;
1112  }
1113
1114  bool isVecListTwoDAllLanes() const {
1115    if (Kind != k_VectorListAllLanes) return false;
1116    return VectorList.Count == 2;
1117  }
1118
1119  bool isVecListOneDByteIndexed() const {
1120    if (Kind != k_VectorListIndexed) return false;
1121    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1122  }
1123
1124  bool isVecListOneDHWordIndexed() const {
1125    if (Kind != k_VectorListIndexed) return false;
1126    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1127  }
1128
1129  bool isVecListOneDWordIndexed() const {
1130    if (Kind != k_VectorListIndexed) return false;
1131    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1132  }
1133
1134  bool isVecListTwoDByteIndexed() const {
1135    if (Kind != k_VectorListIndexed) return false;
1136    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1137  }
1138
1139  bool isVecListTwoDHWordIndexed() const {
1140    if (Kind != k_VectorListIndexed) return false;
1141    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1142  }
1143
1144  bool isVecListTwoDWordIndexed() const {
1145    if (Kind != k_VectorListIndexed) return false;
1146    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1147  }
1148
1149  bool isVectorIndex8() const {
1150    if (Kind != k_VectorIndex) return false;
1151    return VectorIndex.Val < 8;
1152  }
1153  bool isVectorIndex16() const {
1154    if (Kind != k_VectorIndex) return false;
1155    return VectorIndex.Val < 4;
1156  }
1157  bool isVectorIndex32() const {
1158    if (Kind != k_VectorIndex) return false;
1159    return VectorIndex.Val < 2;
1160  }
1161
1162  bool isNEONi8splat() const {
1163    if (Kind != k_Immediate)
1164      return false;
1165    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1166    // Must be a constant.
1167    if (!CE) return false;
1168    int64_t Value = CE->getValue();
1169    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1170    // value.
1171    return Value >= 0 && Value < 256;
1172  }
1173
1174  bool isNEONi16splat() const {
1175    if (Kind != k_Immediate)
1176      return false;
1177    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1178    // Must be a constant.
1179    if (!CE) return false;
1180    int64_t Value = CE->getValue();
1181    // i16 value in the range [0,255] or [0x0100, 0xff00]
1182    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1183  }
1184
1185  bool isNEONi32splat() const {
1186    if (Kind != k_Immediate)
1187      return false;
1188    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1189    // Must be a constant.
1190    if (!CE) return false;
1191    int64_t Value = CE->getValue();
1192    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1193    return (Value >= 0 && Value < 256) ||
1194      (Value >= 0x0100 && Value <= 0xff00) ||
1195      (Value >= 0x010000 && Value <= 0xff0000) ||
1196      (Value >= 0x01000000 && Value <= 0xff000000);
1197  }
1198
1199  bool isNEONi32vmov() const {
1200    if (Kind != k_Immediate)
1201      return false;
1202    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1203    // Must be a constant.
1204    if (!CE) return false;
1205    int64_t Value = CE->getValue();
1206    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1207    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1208    return (Value >= 0 && Value < 256) ||
1209      (Value >= 0x0100 && Value <= 0xff00) ||
1210      (Value >= 0x010000 && Value <= 0xff0000) ||
1211      (Value >= 0x01000000 && Value <= 0xff000000) ||
1212      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1213      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1214  }
1215
1216  bool isNEONi64splat() const {
1217    if (Kind != k_Immediate)
1218      return false;
1219    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1220    // Must be a constant.
1221    if (!CE) return false;
1222    uint64_t Value = CE->getValue();
1223    // i64 value with each byte being either 0 or 0xff.
1224    for (unsigned i = 0; i < 8; ++i)
1225      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1226    return true;
1227  }
1228
1229  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1230    // Add as immediates when possible.  Null MCExpr = 0.
1231    if (Expr == 0)
1232      Inst.addOperand(MCOperand::CreateImm(0));
1233    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1234      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1235    else
1236      Inst.addOperand(MCOperand::CreateExpr(Expr));
1237  }
1238
1239  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1240    assert(N == 2 && "Invalid number of operands!");
1241    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1242    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1243    Inst.addOperand(MCOperand::CreateReg(RegNum));
1244  }
1245
1246  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1247    assert(N == 1 && "Invalid number of operands!");
1248    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1249  }
1250
1251  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1252    assert(N == 1 && "Invalid number of operands!");
1253    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1254  }
1255
1256  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1257    assert(N == 1 && "Invalid number of operands!");
1258    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1259  }
1260
1261  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1262    assert(N == 1 && "Invalid number of operands!");
1263    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1264  }
1265
1266  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1267    assert(N == 1 && "Invalid number of operands!");
1268    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1269  }
1270
1271  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1272    assert(N == 1 && "Invalid number of operands!");
1273    Inst.addOperand(MCOperand::CreateReg(getReg()));
1274  }
1275
1276  void addRegOperands(MCInst &Inst, unsigned N) const {
1277    assert(N == 1 && "Invalid number of operands!");
1278    Inst.addOperand(MCOperand::CreateReg(getReg()));
1279  }
1280
1281  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1282    assert(N == 3 && "Invalid number of operands!");
1283    assert(isRegShiftedReg() &&
1284           "addRegShiftedRegOperands() on non RegShiftedReg!");
1285    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1286    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1287    Inst.addOperand(MCOperand::CreateImm(
1288      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1289  }
1290
1291  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1292    assert(N == 2 && "Invalid number of operands!");
1293    assert(isRegShiftedImm() &&
1294           "addRegShiftedImmOperands() on non RegShiftedImm!");
1295    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1296    Inst.addOperand(MCOperand::CreateImm(
1297      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1298  }
1299
1300  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1301    assert(N == 1 && "Invalid number of operands!");
1302    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1303                                         ShifterImm.Imm));
1304  }
1305
1306  void addRegListOperands(MCInst &Inst, unsigned N) const {
1307    assert(N == 1 && "Invalid number of operands!");
1308    const SmallVectorImpl<unsigned> &RegList = getRegList();
1309    for (SmallVectorImpl<unsigned>::const_iterator
1310           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1311      Inst.addOperand(MCOperand::CreateReg(*I));
1312  }
1313
1314  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1315    addRegListOperands(Inst, N);
1316  }
1317
1318  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1319    addRegListOperands(Inst, N);
1320  }
1321
1322  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1323    assert(N == 1 && "Invalid number of operands!");
1324    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1325    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1326  }
1327
1328  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1329    assert(N == 1 && "Invalid number of operands!");
1330    // Munge the lsb/width into a bitfield mask.
1331    unsigned lsb = Bitfield.LSB;
1332    unsigned width = Bitfield.Width;
1333    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1334    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1335                      (32 - (lsb + width)));
1336    Inst.addOperand(MCOperand::CreateImm(Mask));
1337  }
1338
1339  void addImmOperands(MCInst &Inst, unsigned N) const {
1340    assert(N == 1 && "Invalid number of operands!");
1341    addExpr(Inst, getImm());
1342  }
1343
1344  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1345    assert(N == 1 && "Invalid number of operands!");
1346    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1347  }
1348
1349  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1350    assert(N == 1 && "Invalid number of operands!");
1351    // FIXME: We really want to scale the value here, but the LDRD/STRD
1352    // instruction don't encode operands that way yet.
1353    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1354    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1355  }
1356
1357  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1358    assert(N == 1 && "Invalid number of operands!");
1359    // The immediate is scaled by four in the encoding and is stored
1360    // in the MCInst as such. Lop off the low two bits here.
1361    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1362    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1363  }
1364
1365  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1366    assert(N == 1 && "Invalid number of operands!");
1367    // The immediate is scaled by four in the encoding and is stored
1368    // in the MCInst as such. Lop off the low two bits here.
1369    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1370    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1371  }
1372
1373  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1374    assert(N == 1 && "Invalid number of operands!");
1375    // The constant encodes as the immediate-1, and we store in the instruction
1376    // the bits as encoded, so subtract off one here.
1377    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1378    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1379  }
1380
1381  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1382    assert(N == 1 && "Invalid number of operands!");
1383    // The constant encodes as the immediate-1, and we store in the instruction
1384    // the bits as encoded, so subtract off one here.
1385    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1386    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1387  }
1388
1389  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1390    assert(N == 1 && "Invalid number of operands!");
1391    // The constant encodes as the immediate, except for 32, which encodes as
1392    // zero.
1393    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1394    unsigned Imm = CE->getValue();
1395    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1396  }
1397
1398  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1399    assert(N == 1 && "Invalid number of operands!");
1400    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1401    // the instruction as well.
1402    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1403    int Val = CE->getValue();
1404    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1405  }
1406
1407  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1408    assert(N == 1 && "Invalid number of operands!");
1409    // The operand is actually a t2_so_imm, but we have its bitwise
1410    // negation in the assembly source, so twiddle it here.
1411    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1412    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1413  }
1414
1415  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1416    assert(N == 1 && "Invalid number of operands!");
1417    // The operand is actually a t2_so_imm, but we have its
1418    // negation in the assembly source, so twiddle it here.
1419    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1420    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1421  }
1422
1423  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1424    assert(N == 1 && "Invalid number of operands!");
1425    // The operand is actually a so_imm, but we have its bitwise
1426    // negation in the assembly source, so twiddle it here.
1427    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1428    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1429  }
1430
1431  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1432    assert(N == 1 && "Invalid number of operands!");
1433    // The operand is actually a so_imm, but we have its
1434    // negation in the assembly source, so twiddle it here.
1435    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1436    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1437  }
1438
1439  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1440    assert(N == 1 && "Invalid number of operands!");
1441    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1442  }
1443
1444  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1445    assert(N == 1 && "Invalid number of operands!");
1446    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1447  }
1448
1449  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1450    assert(N == 2 && "Invalid number of operands!");
1451    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1452    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1453  }
1454
1455  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1456    assert(N == 3 && "Invalid number of operands!");
1457    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1458    if (!Memory.OffsetRegNum) {
1459      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1460      // Special case for #-0
1461      if (Val == INT32_MIN) Val = 0;
1462      if (Val < 0) Val = -Val;
1463      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1464    } else {
1465      // For register offset, we encode the shift type and negation flag
1466      // here.
1467      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1468                              Memory.ShiftImm, Memory.ShiftType);
1469    }
1470    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1471    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1472    Inst.addOperand(MCOperand::CreateImm(Val));
1473  }
1474
1475  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 2 && "Invalid number of operands!");
1477    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1478    assert(CE && "non-constant AM2OffsetImm operand!");
1479    int32_t Val = CE->getValue();
1480    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1481    // Special case for #-0
1482    if (Val == INT32_MIN) Val = 0;
1483    if (Val < 0) Val = -Val;
1484    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1485    Inst.addOperand(MCOperand::CreateReg(0));
1486    Inst.addOperand(MCOperand::CreateImm(Val));
1487  }
1488
1489  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1490    assert(N == 3 && "Invalid number of operands!");
1491    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1492    if (!Memory.OffsetRegNum) {
1493      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1494      // Special case for #-0
1495      if (Val == INT32_MIN) Val = 0;
1496      if (Val < 0) Val = -Val;
1497      Val = ARM_AM::getAM3Opc(AddSub, Val);
1498    } else {
1499      // For register offset, we encode the shift type and negation flag
1500      // here.
1501      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1502    }
1503    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1504    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1505    Inst.addOperand(MCOperand::CreateImm(Val));
1506  }
1507
1508  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1509    assert(N == 2 && "Invalid number of operands!");
1510    if (Kind == k_PostIndexRegister) {
1511      int32_t Val =
1512        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1513      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1514      Inst.addOperand(MCOperand::CreateImm(Val));
1515      return;
1516    }
1517
1518    // Constant offset.
1519    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1520    int32_t Val = CE->getValue();
1521    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1522    // Special case for #-0
1523    if (Val == INT32_MIN) Val = 0;
1524    if (Val < 0) Val = -Val;
1525    Val = ARM_AM::getAM3Opc(AddSub, Val);
1526    Inst.addOperand(MCOperand::CreateReg(0));
1527    Inst.addOperand(MCOperand::CreateImm(Val));
1528  }
1529
1530  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1531    assert(N == 2 && "Invalid number of operands!");
1532    // If we have an immediate that's not a constant, treat it as a label
1533    // reference needing a fixup. If it is a constant, it's something else
1534    // and we reject it.
1535    if (isImm()) {
1536      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1537      Inst.addOperand(MCOperand::CreateImm(0));
1538      return;
1539    }
1540
1541    // The lower two bits are always zero and as such are not encoded.
1542    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1543    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1544    // Special case for #-0
1545    if (Val == INT32_MIN) Val = 0;
1546    if (Val < 0) Val = -Val;
1547    Val = ARM_AM::getAM5Opc(AddSub, Val);
1548    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1549    Inst.addOperand(MCOperand::CreateImm(Val));
1550  }
1551
1552  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1553    assert(N == 2 && "Invalid number of operands!");
1554    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1555    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1556    Inst.addOperand(MCOperand::CreateImm(Val));
1557  }
1558
1559  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1560    assert(N == 2 && "Invalid number of operands!");
1561    // The lower two bits are always zero and as such are not encoded.
1562    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1563    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1564    Inst.addOperand(MCOperand::CreateImm(Val));
1565  }
1566
1567  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1568    assert(N == 2 && "Invalid number of operands!");
1569    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1570    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1571    Inst.addOperand(MCOperand::CreateImm(Val));
1572  }
1573
1574  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1575    addMemImm8OffsetOperands(Inst, N);
1576  }
1577
1578  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1579    addMemImm8OffsetOperands(Inst, N);
1580  }
1581
1582  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1583    assert(N == 2 && "Invalid number of operands!");
1584    // If this is an immediate, it's a label reference.
1585    if (Kind == k_Immediate) {
1586      addExpr(Inst, getImm());
1587      Inst.addOperand(MCOperand::CreateImm(0));
1588      return;
1589    }
1590
1591    // Otherwise, it's a normal memory reg+offset.
1592    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1593    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1594    Inst.addOperand(MCOperand::CreateImm(Val));
1595  }
1596
1597  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1598    assert(N == 2 && "Invalid number of operands!");
1599    // If this is an immediate, it's a label reference.
1600    if (Kind == k_Immediate) {
1601      addExpr(Inst, getImm());
1602      Inst.addOperand(MCOperand::CreateImm(0));
1603      return;
1604    }
1605
1606    // Otherwise, it's a normal memory reg+offset.
1607    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1608    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1609    Inst.addOperand(MCOperand::CreateImm(Val));
1610  }
1611
1612  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1613    assert(N == 2 && "Invalid number of operands!");
1614    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1615    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1616  }
1617
1618  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1619    assert(N == 2 && "Invalid number of operands!");
1620    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1621    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1622  }
1623
1624  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1625    assert(N == 3 && "Invalid number of operands!");
1626    unsigned Val =
1627      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1628                        Memory.ShiftImm, Memory.ShiftType);
1629    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1630    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1631    Inst.addOperand(MCOperand::CreateImm(Val));
1632  }
1633
1634  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1635    assert(N == 3 && "Invalid number of operands!");
1636    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1637    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1638    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1639  }
1640
1641  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1642    assert(N == 2 && "Invalid number of operands!");
1643    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1644    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1645  }
1646
1647  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1648    assert(N == 2 && "Invalid number of operands!");
1649    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1650    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1651    Inst.addOperand(MCOperand::CreateImm(Val));
1652  }
1653
1654  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1655    assert(N == 2 && "Invalid number of operands!");
1656    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1657    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1658    Inst.addOperand(MCOperand::CreateImm(Val));
1659  }
1660
1661  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1662    assert(N == 2 && "Invalid number of operands!");
1663    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1664    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1665    Inst.addOperand(MCOperand::CreateImm(Val));
1666  }
1667
1668  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1669    assert(N == 2 && "Invalid number of operands!");
1670    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1671    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1672    Inst.addOperand(MCOperand::CreateImm(Val));
1673  }
1674
1675  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1676    assert(N == 1 && "Invalid number of operands!");
1677    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1678    assert(CE && "non-constant post-idx-imm8 operand!");
1679    int Imm = CE->getValue();
1680    bool isAdd = Imm >= 0;
1681    if (Imm == INT32_MIN) Imm = 0;
1682    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1683    Inst.addOperand(MCOperand::CreateImm(Imm));
1684  }
1685
1686  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1687    assert(N == 1 && "Invalid number of operands!");
1688    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1689    assert(CE && "non-constant post-idx-imm8s4 operand!");
1690    int Imm = CE->getValue();
1691    bool isAdd = Imm >= 0;
1692    if (Imm == INT32_MIN) Imm = 0;
1693    // Immediate is scaled by 4.
1694    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1695    Inst.addOperand(MCOperand::CreateImm(Imm));
1696  }
1697
1698  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1699    assert(N == 2 && "Invalid number of operands!");
1700    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1701    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1702  }
1703
1704  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1705    assert(N == 2 && "Invalid number of operands!");
1706    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1707    // The sign, shift type, and shift amount are encoded in a single operand
1708    // using the AM2 encoding helpers.
1709    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1710    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1711                                     PostIdxReg.ShiftTy);
1712    Inst.addOperand(MCOperand::CreateImm(Imm));
1713  }
1714
1715  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1716    assert(N == 1 && "Invalid number of operands!");
1717    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1718  }
1719
1720  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1721    assert(N == 1 && "Invalid number of operands!");
1722    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1723  }
1724
1725  void addVecListOperands(MCInst &Inst, unsigned N) const {
1726    assert(N == 1 && "Invalid number of operands!");
1727    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1728  }
1729
1730  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1731    assert(N == 2 && "Invalid number of operands!");
1732    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1733    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1734  }
1735
1736  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1737    assert(N == 1 && "Invalid number of operands!");
1738    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1739  }
1740
1741  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1742    assert(N == 1 && "Invalid number of operands!");
1743    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1744  }
1745
1746  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1747    assert(N == 1 && "Invalid number of operands!");
1748    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1749  }
1750
1751  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1752    assert(N == 1 && "Invalid number of operands!");
1753    // The immediate encodes the type of constant as well as the value.
1754    // Mask in that this is an i8 splat.
1755    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1756    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1757  }
1758
1759  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1760    assert(N == 1 && "Invalid number of operands!");
1761    // The immediate encodes the type of constant as well as the value.
1762    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1763    unsigned Value = CE->getValue();
1764    if (Value >= 256)
1765      Value = (Value >> 8) | 0xa00;
1766    else
1767      Value |= 0x800;
1768    Inst.addOperand(MCOperand::CreateImm(Value));
1769  }
1770
1771  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1772    assert(N == 1 && "Invalid number of operands!");
1773    // The immediate encodes the type of constant as well as the value.
1774    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1775    unsigned Value = CE->getValue();
1776    if (Value >= 256 && Value <= 0xff00)
1777      Value = (Value >> 8) | 0x200;
1778    else if (Value > 0xffff && Value <= 0xff0000)
1779      Value = (Value >> 16) | 0x400;
1780    else if (Value > 0xffffff)
1781      Value = (Value >> 24) | 0x600;
1782    Inst.addOperand(MCOperand::CreateImm(Value));
1783  }
1784
1785  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1786    assert(N == 1 && "Invalid number of operands!");
1787    // The immediate encodes the type of constant as well as the value.
1788    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1789    unsigned Value = CE->getValue();
1790    if (Value >= 256 && Value <= 0xffff)
1791      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1792    else if (Value > 0xffff && Value <= 0xffffff)
1793      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1794    else if (Value > 0xffffff)
1795      Value = (Value >> 24) | 0x600;
1796    Inst.addOperand(MCOperand::CreateImm(Value));
1797  }
1798
1799  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1800    assert(N == 1 && "Invalid number of operands!");
1801    // The immediate encodes the type of constant as well as the value.
1802    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1803    uint64_t Value = CE->getValue();
1804    unsigned Imm = 0;
1805    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1806      Imm |= (Value & 1) << i;
1807    }
1808    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1809  }
1810
1811  virtual void print(raw_ostream &OS) const;
1812
1813  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1814    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1815    Op->ITMask.Mask = Mask;
1816    Op->StartLoc = S;
1817    Op->EndLoc = S;
1818    return Op;
1819  }
1820
1821  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1822    ARMOperand *Op = new ARMOperand(k_CondCode);
1823    Op->CC.Val = CC;
1824    Op->StartLoc = S;
1825    Op->EndLoc = S;
1826    return Op;
1827  }
1828
1829  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1830    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1831    Op->Cop.Val = CopVal;
1832    Op->StartLoc = S;
1833    Op->EndLoc = S;
1834    return Op;
1835  }
1836
1837  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1838    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1839    Op->Cop.Val = CopVal;
1840    Op->StartLoc = S;
1841    Op->EndLoc = S;
1842    return Op;
1843  }
1844
1845  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1846    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1847    Op->Cop.Val = Val;
1848    Op->StartLoc = S;
1849    Op->EndLoc = E;
1850    return Op;
1851  }
1852
1853  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1854    ARMOperand *Op = new ARMOperand(k_CCOut);
1855    Op->Reg.RegNum = RegNum;
1856    Op->StartLoc = S;
1857    Op->EndLoc = S;
1858    return Op;
1859  }
1860
1861  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1862    ARMOperand *Op = new ARMOperand(k_Token);
1863    Op->Tok.Data = Str.data();
1864    Op->Tok.Length = Str.size();
1865    Op->StartLoc = S;
1866    Op->EndLoc = S;
1867    return Op;
1868  }
1869
1870  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1871    ARMOperand *Op = new ARMOperand(k_Register);
1872    Op->Reg.RegNum = RegNum;
1873    Op->StartLoc = S;
1874    Op->EndLoc = E;
1875    return Op;
1876  }
1877
1878  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1879                                           unsigned SrcReg,
1880                                           unsigned ShiftReg,
1881                                           unsigned ShiftImm,
1882                                           SMLoc S, SMLoc E) {
1883    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1884    Op->RegShiftedReg.ShiftTy = ShTy;
1885    Op->RegShiftedReg.SrcReg = SrcReg;
1886    Op->RegShiftedReg.ShiftReg = ShiftReg;
1887    Op->RegShiftedReg.ShiftImm = ShiftImm;
1888    Op->StartLoc = S;
1889    Op->EndLoc = E;
1890    return Op;
1891  }
1892
1893  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1894                                            unsigned SrcReg,
1895                                            unsigned ShiftImm,
1896                                            SMLoc S, SMLoc E) {
1897    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1898    Op->RegShiftedImm.ShiftTy = ShTy;
1899    Op->RegShiftedImm.SrcReg = SrcReg;
1900    Op->RegShiftedImm.ShiftImm = ShiftImm;
1901    Op->StartLoc = S;
1902    Op->EndLoc = E;
1903    return Op;
1904  }
1905
1906  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1907                                   SMLoc S, SMLoc E) {
1908    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1909    Op->ShifterImm.isASR = isASR;
1910    Op->ShifterImm.Imm = Imm;
1911    Op->StartLoc = S;
1912    Op->EndLoc = E;
1913    return Op;
1914  }
1915
1916  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1917    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1918    Op->RotImm.Imm = Imm;
1919    Op->StartLoc = S;
1920    Op->EndLoc = E;
1921    return Op;
1922  }
1923
1924  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1925                                    SMLoc S, SMLoc E) {
1926    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1927    Op->Bitfield.LSB = LSB;
1928    Op->Bitfield.Width = Width;
1929    Op->StartLoc = S;
1930    Op->EndLoc = E;
1931    return Op;
1932  }
1933
1934  static ARMOperand *
1935  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1936                SMLoc StartLoc, SMLoc EndLoc) {
1937    KindTy Kind = k_RegisterList;
1938
1939    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1940      Kind = k_DPRRegisterList;
1941    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
1942             contains(Regs.front().first))
1943      Kind = k_SPRRegisterList;
1944
1945    ARMOperand *Op = new ARMOperand(Kind);
1946    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
1947           I = Regs.begin(), E = Regs.end(); I != E; ++I)
1948      Op->Registers.push_back(I->first);
1949    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
1950    Op->StartLoc = StartLoc;
1951    Op->EndLoc = EndLoc;
1952    return Op;
1953  }
1954
1955  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
1956                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
1957    ARMOperand *Op = new ARMOperand(k_VectorList);
1958    Op->VectorList.RegNum = RegNum;
1959    Op->VectorList.Count = Count;
1960    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
1961    Op->StartLoc = S;
1962    Op->EndLoc = E;
1963    return Op;
1964  }
1965
1966  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
1967                                              SMLoc S, SMLoc E) {
1968    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
1969    Op->VectorList.RegNum = RegNum;
1970    Op->VectorList.Count = Count;
1971    Op->StartLoc = S;
1972    Op->EndLoc = E;
1973    return Op;
1974  }
1975
1976  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
1977                                             unsigned Index, SMLoc S, SMLoc E) {
1978    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
1979    Op->VectorList.RegNum = RegNum;
1980    Op->VectorList.Count = Count;
1981    Op->VectorList.LaneIndex = Index;
1982    Op->StartLoc = S;
1983    Op->EndLoc = E;
1984    return Op;
1985  }
1986
1987  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1988                                       MCContext &Ctx) {
1989    ARMOperand *Op = new ARMOperand(k_VectorIndex);
1990    Op->VectorIndex.Val = Idx;
1991    Op->StartLoc = S;
1992    Op->EndLoc = E;
1993    return Op;
1994  }
1995
1996  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
1997    ARMOperand *Op = new ARMOperand(k_Immediate);
1998    Op->Imm.Val = Val;
1999    Op->StartLoc = S;
2000    Op->EndLoc = E;
2001    return Op;
2002  }
2003
2004  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2005    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2006    Op->FPImm.Val = Val;
2007    Op->StartLoc = S;
2008    Op->EndLoc = S;
2009    return Op;
2010  }
2011
2012  static ARMOperand *CreateMem(unsigned BaseRegNum,
2013                               const MCConstantExpr *OffsetImm,
2014                               unsigned OffsetRegNum,
2015                               ARM_AM::ShiftOpc ShiftType,
2016                               unsigned ShiftImm,
2017                               unsigned Alignment,
2018                               bool isNegative,
2019                               SMLoc S, SMLoc E) {
2020    ARMOperand *Op = new ARMOperand(k_Memory);
2021    Op->Memory.BaseRegNum = BaseRegNum;
2022    Op->Memory.OffsetImm = OffsetImm;
2023    Op->Memory.OffsetRegNum = OffsetRegNum;
2024    Op->Memory.ShiftType = ShiftType;
2025    Op->Memory.ShiftImm = ShiftImm;
2026    Op->Memory.Alignment = Alignment;
2027    Op->Memory.isNegative = isNegative;
2028    Op->StartLoc = S;
2029    Op->EndLoc = E;
2030    return Op;
2031  }
2032
2033  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2034                                      ARM_AM::ShiftOpc ShiftTy,
2035                                      unsigned ShiftImm,
2036                                      SMLoc S, SMLoc E) {
2037    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2038    Op->PostIdxReg.RegNum = RegNum;
2039    Op->PostIdxReg.isAdd = isAdd;
2040    Op->PostIdxReg.ShiftTy = ShiftTy;
2041    Op->PostIdxReg.ShiftImm = ShiftImm;
2042    Op->StartLoc = S;
2043    Op->EndLoc = E;
2044    return Op;
2045  }
2046
2047  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2048    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2049    Op->MBOpt.Val = Opt;
2050    Op->StartLoc = S;
2051    Op->EndLoc = S;
2052    return Op;
2053  }
2054
2055  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2056    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2057    Op->IFlags.Val = IFlags;
2058    Op->StartLoc = S;
2059    Op->EndLoc = S;
2060    return Op;
2061  }
2062
2063  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2064    ARMOperand *Op = new ARMOperand(k_MSRMask);
2065    Op->MMask.Val = MMask;
2066    Op->StartLoc = S;
2067    Op->EndLoc = S;
2068    return Op;
2069  }
2070};
2071
2072} // end anonymous namespace.
2073
2074void ARMOperand::print(raw_ostream &OS) const {
2075  switch (Kind) {
2076  case k_FPImmediate:
2077    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2078       << ") >";
2079    break;
2080  case k_CondCode:
2081    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2082    break;
2083  case k_CCOut:
2084    OS << "<ccout " << getReg() << ">";
2085    break;
2086  case k_ITCondMask: {
2087    static const char *MaskStr[] = {
2088      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2089      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2090    };
2091    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2092    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2093    break;
2094  }
2095  case k_CoprocNum:
2096    OS << "<coprocessor number: " << getCoproc() << ">";
2097    break;
2098  case k_CoprocReg:
2099    OS << "<coprocessor register: " << getCoproc() << ">";
2100    break;
2101  case k_CoprocOption:
2102    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2103    break;
2104  case k_MSRMask:
2105    OS << "<mask: " << getMSRMask() << ">";
2106    break;
2107  case k_Immediate:
2108    getImm()->print(OS);
2109    break;
2110  case k_MemBarrierOpt:
2111    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2112    break;
2113  case k_Memory:
2114    OS << "<memory "
2115       << " base:" << Memory.BaseRegNum;
2116    OS << ">";
2117    break;
2118  case k_PostIndexRegister:
2119    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2120       << PostIdxReg.RegNum;
2121    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2122      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2123         << PostIdxReg.ShiftImm;
2124    OS << ">";
2125    break;
2126  case k_ProcIFlags: {
2127    OS << "<ARM_PROC::";
2128    unsigned IFlags = getProcIFlags();
2129    for (int i=2; i >= 0; --i)
2130      if (IFlags & (1 << i))
2131        OS << ARM_PROC::IFlagsToString(1 << i);
2132    OS << ">";
2133    break;
2134  }
2135  case k_Register:
2136    OS << "<register " << getReg() << ">";
2137    break;
2138  case k_ShifterImmediate:
2139    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2140       << " #" << ShifterImm.Imm << ">";
2141    break;
2142  case k_ShiftedRegister:
2143    OS << "<so_reg_reg "
2144       << RegShiftedReg.SrcReg << " "
2145       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2146       << " " << RegShiftedReg.ShiftReg << ">";
2147    break;
2148  case k_ShiftedImmediate:
2149    OS << "<so_reg_imm "
2150       << RegShiftedImm.SrcReg << " "
2151       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2152       << " #" << RegShiftedImm.ShiftImm << ">";
2153    break;
2154  case k_RotateImmediate:
2155    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2156    break;
2157  case k_BitfieldDescriptor:
2158    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2159       << ", width: " << Bitfield.Width << ">";
2160    break;
2161  case k_RegisterList:
2162  case k_DPRRegisterList:
2163  case k_SPRRegisterList: {
2164    OS << "<register_list ";
2165
2166    const SmallVectorImpl<unsigned> &RegList = getRegList();
2167    for (SmallVectorImpl<unsigned>::const_iterator
2168           I = RegList.begin(), E = RegList.end(); I != E; ) {
2169      OS << *I;
2170      if (++I < E) OS << ", ";
2171    }
2172
2173    OS << ">";
2174    break;
2175  }
2176  case k_VectorList:
2177    OS << "<vector_list " << VectorList.Count << " * "
2178       << VectorList.RegNum << ">";
2179    break;
2180  case k_VectorListAllLanes:
2181    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2182       << VectorList.RegNum << ">";
2183    break;
2184  case k_VectorListIndexed:
2185    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2186       << VectorList.Count << " * " << VectorList.RegNum << ">";
2187    break;
2188  case k_Token:
2189    OS << "'" << getToken() << "'";
2190    break;
2191  case k_VectorIndex:
2192    OS << "<vectorindex " << getVectorIndex() << ">";
2193    break;
2194  }
2195}
2196
2197/// @name Auto-generated Match Functions
2198/// {
2199
2200static unsigned MatchRegisterName(StringRef Name);
2201
2202/// }
2203
2204bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2205                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2206  StartLoc = Parser.getTok().getLoc();
2207  RegNo = tryParseRegister();
2208  EndLoc = Parser.getTok().getLoc();
2209
2210  return (RegNo == (unsigned)-1);
2211}
2212
2213/// Try to parse a register name.  The token must be an Identifier when called,
2214/// and if it is a register name the token is eaten and the register number is
2215/// returned.  Otherwise return -1.
2216///
2217int ARMAsmParser::tryParseRegister() {
2218  const AsmToken &Tok = Parser.getTok();
2219  if (Tok.isNot(AsmToken::Identifier)) return -1;
2220
2221  std::string lowerCase = Tok.getString().lower();
2222  unsigned RegNum = MatchRegisterName(lowerCase);
2223  if (!RegNum) {
2224    RegNum = StringSwitch<unsigned>(lowerCase)
2225      .Case("r13", ARM::SP)
2226      .Case("r14", ARM::LR)
2227      .Case("r15", ARM::PC)
2228      .Case("ip", ARM::R12)
2229      // Additional register name aliases for 'gas' compatibility.
2230      .Case("a1", ARM::R0)
2231      .Case("a2", ARM::R1)
2232      .Case("a3", ARM::R2)
2233      .Case("a4", ARM::R3)
2234      .Case("v1", ARM::R4)
2235      .Case("v2", ARM::R5)
2236      .Case("v3", ARM::R6)
2237      .Case("v4", ARM::R7)
2238      .Case("v5", ARM::R8)
2239      .Case("v6", ARM::R9)
2240      .Case("v7", ARM::R10)
2241      .Case("v8", ARM::R11)
2242      .Case("sb", ARM::R9)
2243      .Case("sl", ARM::R10)
2244      .Case("fp", ARM::R11)
2245      .Default(0);
2246  }
2247  if (!RegNum) {
2248    // Check for aliases registered via .req.
2249    StringMap<unsigned>::const_iterator Entry =
2250      RegisterReqs.find(Tok.getIdentifier());
2251    // If no match, return failure.
2252    if (Entry == RegisterReqs.end())
2253      return -1;
2254    Parser.Lex(); // Eat identifier token.
2255    return Entry->getValue();
2256  }
2257
2258  Parser.Lex(); // Eat identifier token.
2259
2260  return RegNum;
2261}
2262
2263// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2264// If a recoverable error occurs, return 1. If an irrecoverable error
2265// occurs, return -1. An irrecoverable error is one where tokens have been
2266// consumed in the process of trying to parse the shifter (i.e., when it is
2267// indeed a shifter operand, but malformed).
2268int ARMAsmParser::tryParseShiftRegister(
2269                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2270  SMLoc S = Parser.getTok().getLoc();
2271  const AsmToken &Tok = Parser.getTok();
2272  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2273
2274  std::string lowerCase = Tok.getString().lower();
2275  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2276      .Case("asl", ARM_AM::lsl)
2277      .Case("lsl", ARM_AM::lsl)
2278      .Case("lsr", ARM_AM::lsr)
2279      .Case("asr", ARM_AM::asr)
2280      .Case("ror", ARM_AM::ror)
2281      .Case("rrx", ARM_AM::rrx)
2282      .Default(ARM_AM::no_shift);
2283
2284  if (ShiftTy == ARM_AM::no_shift)
2285    return 1;
2286
2287  Parser.Lex(); // Eat the operator.
2288
2289  // The source register for the shift has already been added to the
2290  // operand list, so we need to pop it off and combine it into the shifted
2291  // register operand instead.
2292  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2293  if (!PrevOp->isReg())
2294    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2295  int SrcReg = PrevOp->getReg();
2296  int64_t Imm = 0;
2297  int ShiftReg = 0;
2298  if (ShiftTy == ARM_AM::rrx) {
2299    // RRX Doesn't have an explicit shift amount. The encoder expects
2300    // the shift register to be the same as the source register. Seems odd,
2301    // but OK.
2302    ShiftReg = SrcReg;
2303  } else {
2304    // Figure out if this is shifted by a constant or a register (for non-RRX).
2305    if (Parser.getTok().is(AsmToken::Hash) ||
2306        Parser.getTok().is(AsmToken::Dollar)) {
2307      Parser.Lex(); // Eat hash.
2308      SMLoc ImmLoc = Parser.getTok().getLoc();
2309      const MCExpr *ShiftExpr = 0;
2310      if (getParser().ParseExpression(ShiftExpr)) {
2311        Error(ImmLoc, "invalid immediate shift value");
2312        return -1;
2313      }
2314      // The expression must be evaluatable as an immediate.
2315      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2316      if (!CE) {
2317        Error(ImmLoc, "invalid immediate shift value");
2318        return -1;
2319      }
2320      // Range check the immediate.
2321      // lsl, ror: 0 <= imm <= 31
2322      // lsr, asr: 0 <= imm <= 32
2323      Imm = CE->getValue();
2324      if (Imm < 0 ||
2325          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2326          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2327        Error(ImmLoc, "immediate shift value out of range");
2328        return -1;
2329      }
2330    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2331      ShiftReg = tryParseRegister();
2332      SMLoc L = Parser.getTok().getLoc();
2333      if (ShiftReg == -1) {
2334        Error (L, "expected immediate or register in shift operand");
2335        return -1;
2336      }
2337    } else {
2338      Error (Parser.getTok().getLoc(),
2339                    "expected immediate or register in shift operand");
2340      return -1;
2341    }
2342  }
2343
2344  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2345    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2346                                                         ShiftReg, Imm,
2347                                               S, Parser.getTok().getLoc()));
2348  else
2349    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2350                                               S, Parser.getTok().getLoc()));
2351
2352  return 0;
2353}
2354
2355
2356/// Try to parse a register name.  The token must be an Identifier when called.
2357/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2358/// if there is a "writeback". 'true' if it's not a register.
2359///
2360/// TODO this is likely to change to allow different register types and or to
2361/// parse for a specific register type.
2362bool ARMAsmParser::
2363tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2364  SMLoc S = Parser.getTok().getLoc();
2365  int RegNo = tryParseRegister();
2366  if (RegNo == -1)
2367    return true;
2368
2369  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2370
2371  const AsmToken &ExclaimTok = Parser.getTok();
2372  if (ExclaimTok.is(AsmToken::Exclaim)) {
2373    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2374                                               ExclaimTok.getLoc()));
2375    Parser.Lex(); // Eat exclaim token
2376    return false;
2377  }
2378
2379  // Also check for an index operand. This is only legal for vector registers,
2380  // but that'll get caught OK in operand matching, so we don't need to
2381  // explicitly filter everything else out here.
2382  if (Parser.getTok().is(AsmToken::LBrac)) {
2383    SMLoc SIdx = Parser.getTok().getLoc();
2384    Parser.Lex(); // Eat left bracket token.
2385
2386    const MCExpr *ImmVal;
2387    if (getParser().ParseExpression(ImmVal))
2388      return MatchOperand_ParseFail;
2389    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2390    if (!MCE) {
2391      TokError("immediate value expected for vector index");
2392      return MatchOperand_ParseFail;
2393    }
2394
2395    SMLoc E = Parser.getTok().getLoc();
2396    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2397      Error(E, "']' expected");
2398      return MatchOperand_ParseFail;
2399    }
2400
2401    Parser.Lex(); // Eat right bracket token.
2402
2403    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2404                                                     SIdx, E,
2405                                                     getContext()));
2406  }
2407
2408  return false;
2409}
2410
2411/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2412/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2413/// "c5", ...
2414static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2415  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2416  // but efficient.
2417  switch (Name.size()) {
2418  default: break;
2419  case 2:
2420    if (Name[0] != CoprocOp)
2421      return -1;
2422    switch (Name[1]) {
2423    default:  return -1;
2424    case '0': return 0;
2425    case '1': return 1;
2426    case '2': return 2;
2427    case '3': return 3;
2428    case '4': return 4;
2429    case '5': return 5;
2430    case '6': return 6;
2431    case '7': return 7;
2432    case '8': return 8;
2433    case '9': return 9;
2434    }
2435    break;
2436  case 3:
2437    if (Name[0] != CoprocOp || Name[1] != '1')
2438      return -1;
2439    switch (Name[2]) {
2440    default:  return -1;
2441    case '0': return 10;
2442    case '1': return 11;
2443    case '2': return 12;
2444    case '3': return 13;
2445    case '4': return 14;
2446    case '5': return 15;
2447    }
2448    break;
2449  }
2450
2451  return -1;
2452}
2453
2454/// parseITCondCode - Try to parse a condition code for an IT instruction.
2455ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2456parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2457  SMLoc S = Parser.getTok().getLoc();
2458  const AsmToken &Tok = Parser.getTok();
2459  if (!Tok.is(AsmToken::Identifier))
2460    return MatchOperand_NoMatch;
2461  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2462    .Case("eq", ARMCC::EQ)
2463    .Case("ne", ARMCC::NE)
2464    .Case("hs", ARMCC::HS)
2465    .Case("cs", ARMCC::HS)
2466    .Case("lo", ARMCC::LO)
2467    .Case("cc", ARMCC::LO)
2468    .Case("mi", ARMCC::MI)
2469    .Case("pl", ARMCC::PL)
2470    .Case("vs", ARMCC::VS)
2471    .Case("vc", ARMCC::VC)
2472    .Case("hi", ARMCC::HI)
2473    .Case("ls", ARMCC::LS)
2474    .Case("ge", ARMCC::GE)
2475    .Case("lt", ARMCC::LT)
2476    .Case("gt", ARMCC::GT)
2477    .Case("le", ARMCC::LE)
2478    .Case("al", ARMCC::AL)
2479    .Default(~0U);
2480  if (CC == ~0U)
2481    return MatchOperand_NoMatch;
2482  Parser.Lex(); // Eat the token.
2483
2484  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2485
2486  return MatchOperand_Success;
2487}
2488
2489/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2490/// token must be an Identifier when called, and if it is a coprocessor
2491/// number, the token is eaten and the operand is added to the operand list.
2492ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2493parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2494  SMLoc S = Parser.getTok().getLoc();
2495  const AsmToken &Tok = Parser.getTok();
2496  if (Tok.isNot(AsmToken::Identifier))
2497    return MatchOperand_NoMatch;
2498
2499  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2500  if (Num == -1)
2501    return MatchOperand_NoMatch;
2502
2503  Parser.Lex(); // Eat identifier token.
2504  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2505  return MatchOperand_Success;
2506}
2507
2508/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2509/// token must be an Identifier when called, and if it is a coprocessor
2510/// number, the token is eaten and the operand is added to the operand list.
2511ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2512parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2513  SMLoc S = Parser.getTok().getLoc();
2514  const AsmToken &Tok = Parser.getTok();
2515  if (Tok.isNot(AsmToken::Identifier))
2516    return MatchOperand_NoMatch;
2517
2518  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2519  if (Reg == -1)
2520    return MatchOperand_NoMatch;
2521
2522  Parser.Lex(); // Eat identifier token.
2523  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2524  return MatchOperand_Success;
2525}
2526
2527/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2528/// coproc_option : '{' imm0_255 '}'
2529ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2530parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2531  SMLoc S = Parser.getTok().getLoc();
2532
2533  // If this isn't a '{', this isn't a coprocessor immediate operand.
2534  if (Parser.getTok().isNot(AsmToken::LCurly))
2535    return MatchOperand_NoMatch;
2536  Parser.Lex(); // Eat the '{'
2537
2538  const MCExpr *Expr;
2539  SMLoc Loc = Parser.getTok().getLoc();
2540  if (getParser().ParseExpression(Expr)) {
2541    Error(Loc, "illegal expression");
2542    return MatchOperand_ParseFail;
2543  }
2544  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2545  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2546    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2547    return MatchOperand_ParseFail;
2548  }
2549  int Val = CE->getValue();
2550
2551  // Check for and consume the closing '}'
2552  if (Parser.getTok().isNot(AsmToken::RCurly))
2553    return MatchOperand_ParseFail;
2554  SMLoc E = Parser.getTok().getLoc();
2555  Parser.Lex(); // Eat the '}'
2556
2557  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2558  return MatchOperand_Success;
2559}
2560
2561// For register list parsing, we need to map from raw GPR register numbering
2562// to the enumeration values. The enumeration values aren't sorted by
2563// register number due to our using "sp", "lr" and "pc" as canonical names.
2564static unsigned getNextRegister(unsigned Reg) {
2565  // If this is a GPR, we need to do it manually, otherwise we can rely
2566  // on the sort ordering of the enumeration since the other reg-classes
2567  // are sane.
2568  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2569    return Reg + 1;
2570  switch(Reg) {
2571  default: assert(0 && "Invalid GPR number!");
2572  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2573  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2574  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2575  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2576  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2577  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2578  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2579  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2580  }
2581}
2582
2583// Return the low-subreg of a given Q register.
2584static unsigned getDRegFromQReg(unsigned QReg) {
2585  switch (QReg) {
2586  default: llvm_unreachable("expected a Q register!");
2587  case ARM::Q0:  return ARM::D0;
2588  case ARM::Q1:  return ARM::D2;
2589  case ARM::Q2:  return ARM::D4;
2590  case ARM::Q3:  return ARM::D6;
2591  case ARM::Q4:  return ARM::D8;
2592  case ARM::Q5:  return ARM::D10;
2593  case ARM::Q6:  return ARM::D12;
2594  case ARM::Q7:  return ARM::D14;
2595  case ARM::Q8:  return ARM::D16;
2596  case ARM::Q9:  return ARM::D18;
2597  case ARM::Q10: return ARM::D20;
2598  case ARM::Q11: return ARM::D22;
2599  case ARM::Q12: return ARM::D24;
2600  case ARM::Q13: return ARM::D26;
2601  case ARM::Q14: return ARM::D28;
2602  case ARM::Q15: return ARM::D30;
2603  }
2604}
2605
2606/// Parse a register list.
2607bool ARMAsmParser::
2608parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2609  assert(Parser.getTok().is(AsmToken::LCurly) &&
2610         "Token is not a Left Curly Brace");
2611  SMLoc S = Parser.getTok().getLoc();
2612  Parser.Lex(); // Eat '{' token.
2613  SMLoc RegLoc = Parser.getTok().getLoc();
2614
2615  // Check the first register in the list to see what register class
2616  // this is a list of.
2617  int Reg = tryParseRegister();
2618  if (Reg == -1)
2619    return Error(RegLoc, "register expected");
2620
2621  // The reglist instructions have at most 16 registers, so reserve
2622  // space for that many.
2623  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2624
2625  // Allow Q regs and just interpret them as the two D sub-registers.
2626  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2627    Reg = getDRegFromQReg(Reg);
2628    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2629    ++Reg;
2630  }
2631  const MCRegisterClass *RC;
2632  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2633    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2634  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2635    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2636  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2637    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2638  else
2639    return Error(RegLoc, "invalid register in register list");
2640
2641  // Store the register.
2642  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2643
2644  // This starts immediately after the first register token in the list,
2645  // so we can see either a comma or a minus (range separator) as a legal
2646  // next token.
2647  while (Parser.getTok().is(AsmToken::Comma) ||
2648         Parser.getTok().is(AsmToken::Minus)) {
2649    if (Parser.getTok().is(AsmToken::Minus)) {
2650      Parser.Lex(); // Eat the minus.
2651      SMLoc EndLoc = Parser.getTok().getLoc();
2652      int EndReg = tryParseRegister();
2653      if (EndReg == -1)
2654        return Error(EndLoc, "register expected");
2655      // Allow Q regs and just interpret them as the two D sub-registers.
2656      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2657        EndReg = getDRegFromQReg(EndReg) + 1;
2658      // If the register is the same as the start reg, there's nothing
2659      // more to do.
2660      if (Reg == EndReg)
2661        continue;
2662      // The register must be in the same register class as the first.
2663      if (!RC->contains(EndReg))
2664        return Error(EndLoc, "invalid register in register list");
2665      // Ranges must go from low to high.
2666      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2667        return Error(EndLoc, "bad range in register list");
2668
2669      // Add all the registers in the range to the register list.
2670      while (Reg != EndReg) {
2671        Reg = getNextRegister(Reg);
2672        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2673      }
2674      continue;
2675    }
2676    Parser.Lex(); // Eat the comma.
2677    RegLoc = Parser.getTok().getLoc();
2678    int OldReg = Reg;
2679    const AsmToken RegTok = Parser.getTok();
2680    Reg = tryParseRegister();
2681    if (Reg == -1)
2682      return Error(RegLoc, "register expected");
2683    // Allow Q regs and just interpret them as the two D sub-registers.
2684    bool isQReg = false;
2685    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2686      Reg = getDRegFromQReg(Reg);
2687      isQReg = true;
2688    }
2689    // The register must be in the same register class as the first.
2690    if (!RC->contains(Reg))
2691      return Error(RegLoc, "invalid register in register list");
2692    // List must be monotonically increasing.
2693    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2694      return Error(RegLoc, "register list not in ascending order");
2695    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2696      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2697              ") in register list");
2698      continue;
2699    }
2700    // VFP register lists must also be contiguous.
2701    // It's OK to use the enumeration values directly here rather, as the
2702    // VFP register classes have the enum sorted properly.
2703    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2704        Reg != OldReg + 1)
2705      return Error(RegLoc, "non-contiguous register range");
2706    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2707    if (isQReg)
2708      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2709  }
2710
2711  SMLoc E = Parser.getTok().getLoc();
2712  if (Parser.getTok().isNot(AsmToken::RCurly))
2713    return Error(E, "'}' expected");
2714  Parser.Lex(); // Eat '}' token.
2715
2716  // Push the register list operand.
2717  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2718
2719  // The ARM system instruction variants for LDM/STM have a '^' token here.
2720  if (Parser.getTok().is(AsmToken::Caret)) {
2721    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2722    Parser.Lex(); // Eat '^' token.
2723  }
2724
2725  return false;
2726}
2727
2728// Helper function to parse the lane index for vector lists.
2729ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2730parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2731  Index = 0; // Always return a defined index value.
2732  if (Parser.getTok().is(AsmToken::LBrac)) {
2733    Parser.Lex(); // Eat the '['.
2734    if (Parser.getTok().is(AsmToken::RBrac)) {
2735      // "Dn[]" is the 'all lanes' syntax.
2736      LaneKind = AllLanes;
2737      Parser.Lex(); // Eat the ']'.
2738      return MatchOperand_Success;
2739    }
2740    if (Parser.getTok().is(AsmToken::Integer)) {
2741      int64_t Val = Parser.getTok().getIntVal();
2742      // Make this range check context sensitive for .8, .16, .32.
2743      if (Val < 0 && Val > 7)
2744        Error(Parser.getTok().getLoc(), "lane index out of range");
2745      Index = Val;
2746      LaneKind = IndexedLane;
2747      Parser.Lex(); // Eat the token;
2748      if (Parser.getTok().isNot(AsmToken::RBrac))
2749        Error(Parser.getTok().getLoc(), "']' expected");
2750      Parser.Lex(); // Eat the ']'.
2751      return MatchOperand_Success;
2752    }
2753    Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2754    return MatchOperand_ParseFail;
2755  }
2756  LaneKind = NoLanes;
2757  return MatchOperand_Success;
2758}
2759
2760// parse a vector register list
2761ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2762parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2763  VectorLaneTy LaneKind;
2764  unsigned LaneIndex;
2765  SMLoc S = Parser.getTok().getLoc();
2766  // As an extension (to match gas), support a plain D register or Q register
2767  // (without encosing curly braces) as a single or double entry list,
2768  // respectively.
2769  if (Parser.getTok().is(AsmToken::Identifier)) {
2770    int Reg = tryParseRegister();
2771    if (Reg == -1)
2772      return MatchOperand_NoMatch;
2773    SMLoc E = Parser.getTok().getLoc();
2774    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2775      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2776      if (Res != MatchOperand_Success)
2777        return Res;
2778      switch (LaneKind) {
2779      default:
2780        assert(0 && "unexpected lane kind!");
2781      case NoLanes:
2782        E = Parser.getTok().getLoc();
2783        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2784        break;
2785      case AllLanes:
2786        E = Parser.getTok().getLoc();
2787        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2788        break;
2789      case IndexedLane:
2790        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2791                                                               LaneIndex, S,E));
2792        break;
2793      }
2794      return MatchOperand_Success;
2795    }
2796    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2797      Reg = getDRegFromQReg(Reg);
2798      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2799      if (Res != MatchOperand_Success)
2800        return Res;
2801      switch (LaneKind) {
2802      default:
2803        assert(0 && "unexpected lane kind!");
2804      case NoLanes:
2805        E = Parser.getTok().getLoc();
2806        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2807        break;
2808      case AllLanes:
2809        E = Parser.getTok().getLoc();
2810        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2811        break;
2812      case IndexedLane:
2813        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2814                                                               LaneIndex, S,E));
2815        break;
2816      }
2817      return MatchOperand_Success;
2818    }
2819    Error(S, "vector register expected");
2820    return MatchOperand_ParseFail;
2821  }
2822
2823  if (Parser.getTok().isNot(AsmToken::LCurly))
2824    return MatchOperand_NoMatch;
2825
2826  Parser.Lex(); // Eat '{' token.
2827  SMLoc RegLoc = Parser.getTok().getLoc();
2828
2829  int Reg = tryParseRegister();
2830  if (Reg == -1) {
2831    Error(RegLoc, "register expected");
2832    return MatchOperand_ParseFail;
2833  }
2834  unsigned Count = 1;
2835  int Spacing = 0;
2836  unsigned FirstReg = Reg;
2837  // The list is of D registers, but we also allow Q regs and just interpret
2838  // them as the two D sub-registers.
2839  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2840    FirstReg = Reg = getDRegFromQReg(Reg);
2841    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2842                 // it's ambiguous with four-register single spaced.
2843    ++Reg;
2844    ++Count;
2845  }
2846  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2847    return MatchOperand_ParseFail;
2848
2849  while (Parser.getTok().is(AsmToken::Comma) ||
2850         Parser.getTok().is(AsmToken::Minus)) {
2851    if (Parser.getTok().is(AsmToken::Minus)) {
2852      if (!Spacing)
2853        Spacing = 1; // Register range implies a single spaced list.
2854      else if (Spacing == 2) {
2855        Error(Parser.getTok().getLoc(),
2856              "sequential registers in double spaced list");
2857        return MatchOperand_ParseFail;
2858      }
2859      Parser.Lex(); // Eat the minus.
2860      SMLoc EndLoc = Parser.getTok().getLoc();
2861      int EndReg = tryParseRegister();
2862      if (EndReg == -1) {
2863        Error(EndLoc, "register expected");
2864        return MatchOperand_ParseFail;
2865      }
2866      // Allow Q regs and just interpret them as the two D sub-registers.
2867      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2868        EndReg = getDRegFromQReg(EndReg) + 1;
2869      // If the register is the same as the start reg, there's nothing
2870      // more to do.
2871      if (Reg == EndReg)
2872        continue;
2873      // The register must be in the same register class as the first.
2874      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2875        Error(EndLoc, "invalid register in register list");
2876        return MatchOperand_ParseFail;
2877      }
2878      // Ranges must go from low to high.
2879      if (Reg > EndReg) {
2880        Error(EndLoc, "bad range in register list");
2881        return MatchOperand_ParseFail;
2882      }
2883      // Parse the lane specifier if present.
2884      VectorLaneTy NextLaneKind;
2885      unsigned NextLaneIndex;
2886      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2887        return MatchOperand_ParseFail;
2888      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2889        Error(EndLoc, "mismatched lane index in register list");
2890        return MatchOperand_ParseFail;
2891      }
2892      EndLoc = Parser.getTok().getLoc();
2893
2894      // Add all the registers in the range to the register list.
2895      Count += EndReg - Reg;
2896      Reg = EndReg;
2897      continue;
2898    }
2899    Parser.Lex(); // Eat the comma.
2900    RegLoc = Parser.getTok().getLoc();
2901    int OldReg = Reg;
2902    Reg = tryParseRegister();
2903    if (Reg == -1) {
2904      Error(RegLoc, "register expected");
2905      return MatchOperand_ParseFail;
2906    }
2907    // vector register lists must be contiguous.
2908    // It's OK to use the enumeration values directly here rather, as the
2909    // VFP register classes have the enum sorted properly.
2910    //
2911    // The list is of D registers, but we also allow Q regs and just interpret
2912    // them as the two D sub-registers.
2913    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2914      if (!Spacing)
2915        Spacing = 1; // Register range implies a single spaced list.
2916      else if (Spacing == 2) {
2917        Error(RegLoc,
2918              "invalid register in double-spaced list (must be 'D' register')");
2919        return MatchOperand_ParseFail;
2920      }
2921      Reg = getDRegFromQReg(Reg);
2922      if (Reg != OldReg + 1) {
2923        Error(RegLoc, "non-contiguous register range");
2924        return MatchOperand_ParseFail;
2925      }
2926      ++Reg;
2927      Count += 2;
2928      // Parse the lane specifier if present.
2929      VectorLaneTy NextLaneKind;
2930      unsigned NextLaneIndex;
2931      SMLoc EndLoc = Parser.getTok().getLoc();
2932      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2933        return MatchOperand_ParseFail;
2934      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2935        Error(EndLoc, "mismatched lane index in register list");
2936        return MatchOperand_ParseFail;
2937      }
2938      continue;
2939    }
2940    // Normal D register.
2941    // Figure out the register spacing (single or double) of the list if
2942    // we don't know it already.
2943    if (!Spacing)
2944      Spacing = 1 + (Reg == OldReg + 2);
2945
2946    // Just check that it's contiguous and keep going.
2947    if (Reg != OldReg + Spacing) {
2948      Error(RegLoc, "non-contiguous register range");
2949      return MatchOperand_ParseFail;
2950    }
2951    ++Count;
2952    // Parse the lane specifier if present.
2953    VectorLaneTy NextLaneKind;
2954    unsigned NextLaneIndex;
2955    SMLoc EndLoc = Parser.getTok().getLoc();
2956    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2957      return MatchOperand_ParseFail;
2958    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2959      Error(EndLoc, "mismatched lane index in register list");
2960      return MatchOperand_ParseFail;
2961    }
2962    if (Spacing == 2 && LaneKind != NoLanes) {
2963      Error(EndLoc,
2964            "lane index specfier invalid in double spaced register list");
2965      return MatchOperand_ParseFail;
2966    }
2967  }
2968
2969  SMLoc E = Parser.getTok().getLoc();
2970  if (Parser.getTok().isNot(AsmToken::RCurly)) {
2971    Error(E, "'}' expected");
2972    return MatchOperand_ParseFail;
2973  }
2974  Parser.Lex(); // Eat '}' token.
2975
2976  switch (LaneKind) {
2977  default:
2978    assert(0 && "unexpected lane kind in register list.");
2979  case NoLanes:
2980    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
2981                                                    (Spacing == 2), S, E));
2982    break;
2983  case AllLanes:
2984    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
2985                                                            S, E));
2986    break;
2987  case IndexedLane:
2988    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
2989                                                           LaneIndex, S, E));
2990    break;
2991  }
2992  return MatchOperand_Success;
2993}
2994
2995/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
2996ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2997parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2998  SMLoc S = Parser.getTok().getLoc();
2999  const AsmToken &Tok = Parser.getTok();
3000  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3001  StringRef OptStr = Tok.getString();
3002
3003  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3004    .Case("sy",    ARM_MB::SY)
3005    .Case("st",    ARM_MB::ST)
3006    .Case("sh",    ARM_MB::ISH)
3007    .Case("ish",   ARM_MB::ISH)
3008    .Case("shst",  ARM_MB::ISHST)
3009    .Case("ishst", ARM_MB::ISHST)
3010    .Case("nsh",   ARM_MB::NSH)
3011    .Case("un",    ARM_MB::NSH)
3012    .Case("nshst", ARM_MB::NSHST)
3013    .Case("unst",  ARM_MB::NSHST)
3014    .Case("osh",   ARM_MB::OSH)
3015    .Case("oshst", ARM_MB::OSHST)
3016    .Default(~0U);
3017
3018  if (Opt == ~0U)
3019    return MatchOperand_NoMatch;
3020
3021  Parser.Lex(); // Eat identifier token.
3022  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3023  return MatchOperand_Success;
3024}
3025
3026/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3027ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3028parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3029  SMLoc S = Parser.getTok().getLoc();
3030  const AsmToken &Tok = Parser.getTok();
3031  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3032  StringRef IFlagsStr = Tok.getString();
3033
3034  // An iflags string of "none" is interpreted to mean that none of the AIF
3035  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3036  unsigned IFlags = 0;
3037  if (IFlagsStr != "none") {
3038        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3039      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3040        .Case("a", ARM_PROC::A)
3041        .Case("i", ARM_PROC::I)
3042        .Case("f", ARM_PROC::F)
3043        .Default(~0U);
3044
3045      // If some specific iflag is already set, it means that some letter is
3046      // present more than once, this is not acceptable.
3047      if (Flag == ~0U || (IFlags & Flag))
3048        return MatchOperand_NoMatch;
3049
3050      IFlags |= Flag;
3051    }
3052  }
3053
3054  Parser.Lex(); // Eat identifier token.
3055  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3056  return MatchOperand_Success;
3057}
3058
3059/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3060ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3061parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3062  SMLoc S = Parser.getTok().getLoc();
3063  const AsmToken &Tok = Parser.getTok();
3064  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3065  StringRef Mask = Tok.getString();
3066
3067  if (isMClass()) {
3068    // See ARMv6-M 10.1.1
3069    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3070      .Case("apsr", 0)
3071      .Case("iapsr", 1)
3072      .Case("eapsr", 2)
3073      .Case("xpsr", 3)
3074      .Case("ipsr", 5)
3075      .Case("epsr", 6)
3076      .Case("iepsr", 7)
3077      .Case("msp", 8)
3078      .Case("psp", 9)
3079      .Case("primask", 16)
3080      .Case("basepri", 17)
3081      .Case("basepri_max", 18)
3082      .Case("faultmask", 19)
3083      .Case("control", 20)
3084      .Default(~0U);
3085
3086    if (FlagsVal == ~0U)
3087      return MatchOperand_NoMatch;
3088
3089    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3090      // basepri, basepri_max and faultmask only valid for V7m.
3091      return MatchOperand_NoMatch;
3092
3093    Parser.Lex(); // Eat identifier token.
3094    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3095    return MatchOperand_Success;
3096  }
3097
3098  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3099  size_t Start = 0, Next = Mask.find('_');
3100  StringRef Flags = "";
3101  std::string SpecReg = Mask.slice(Start, Next).lower();
3102  if (Next != StringRef::npos)
3103    Flags = Mask.slice(Next+1, Mask.size());
3104
3105  // FlagsVal contains the complete mask:
3106  // 3-0: Mask
3107  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3108  unsigned FlagsVal = 0;
3109
3110  if (SpecReg == "apsr") {
3111    FlagsVal = StringSwitch<unsigned>(Flags)
3112    .Case("nzcvq",  0x8) // same as CPSR_f
3113    .Case("g",      0x4) // same as CPSR_s
3114    .Case("nzcvqg", 0xc) // same as CPSR_fs
3115    .Default(~0U);
3116
3117    if (FlagsVal == ~0U) {
3118      if (!Flags.empty())
3119        return MatchOperand_NoMatch;
3120      else
3121        FlagsVal = 8; // No flag
3122    }
3123  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3124    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3125      Flags = "fc";
3126    for (int i = 0, e = Flags.size(); i != e; ++i) {
3127      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3128      .Case("c", 1)
3129      .Case("x", 2)
3130      .Case("s", 4)
3131      .Case("f", 8)
3132      .Default(~0U);
3133
3134      // If some specific flag is already set, it means that some letter is
3135      // present more than once, this is not acceptable.
3136      if (FlagsVal == ~0U || (FlagsVal & Flag))
3137        return MatchOperand_NoMatch;
3138      FlagsVal |= Flag;
3139    }
3140  } else // No match for special register.
3141    return MatchOperand_NoMatch;
3142
3143  // Special register without flags is NOT equivalent to "fc" flags.
3144  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3145  // two lines would enable gas compatibility at the expense of breaking
3146  // round-tripping.
3147  //
3148  // if (!FlagsVal)
3149  //  FlagsVal = 0x9;
3150
3151  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3152  if (SpecReg == "spsr")
3153    FlagsVal |= 16;
3154
3155  Parser.Lex(); // Eat identifier token.
3156  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3157  return MatchOperand_Success;
3158}
3159
3160ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3161parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3162            int Low, int High) {
3163  const AsmToken &Tok = Parser.getTok();
3164  if (Tok.isNot(AsmToken::Identifier)) {
3165    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3166    return MatchOperand_ParseFail;
3167  }
3168  StringRef ShiftName = Tok.getString();
3169  std::string LowerOp = Op.lower();
3170  std::string UpperOp = Op.upper();
3171  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3172    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3173    return MatchOperand_ParseFail;
3174  }
3175  Parser.Lex(); // Eat shift type token.
3176
3177  // There must be a '#' and a shift amount.
3178  if (Parser.getTok().isNot(AsmToken::Hash) &&
3179      Parser.getTok().isNot(AsmToken::Dollar)) {
3180    Error(Parser.getTok().getLoc(), "'#' expected");
3181    return MatchOperand_ParseFail;
3182  }
3183  Parser.Lex(); // Eat hash token.
3184
3185  const MCExpr *ShiftAmount;
3186  SMLoc Loc = Parser.getTok().getLoc();
3187  if (getParser().ParseExpression(ShiftAmount)) {
3188    Error(Loc, "illegal expression");
3189    return MatchOperand_ParseFail;
3190  }
3191  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3192  if (!CE) {
3193    Error(Loc, "constant expression expected");
3194    return MatchOperand_ParseFail;
3195  }
3196  int Val = CE->getValue();
3197  if (Val < Low || Val > High) {
3198    Error(Loc, "immediate value out of range");
3199    return MatchOperand_ParseFail;
3200  }
3201
3202  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3203
3204  return MatchOperand_Success;
3205}
3206
3207ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3208parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3209  const AsmToken &Tok = Parser.getTok();
3210  SMLoc S = Tok.getLoc();
3211  if (Tok.isNot(AsmToken::Identifier)) {
3212    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3213    return MatchOperand_ParseFail;
3214  }
3215  int Val = StringSwitch<int>(Tok.getString())
3216    .Case("be", 1)
3217    .Case("le", 0)
3218    .Default(-1);
3219  Parser.Lex(); // Eat the token.
3220
3221  if (Val == -1) {
3222    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3223    return MatchOperand_ParseFail;
3224  }
3225  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3226                                                                  getContext()),
3227                                           S, Parser.getTok().getLoc()));
3228  return MatchOperand_Success;
3229}
3230
3231/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3232/// instructions. Legal values are:
3233///     lsl #n  'n' in [0,31]
3234///     asr #n  'n' in [1,32]
3235///             n == 32 encoded as n == 0.
3236ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3237parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3238  const AsmToken &Tok = Parser.getTok();
3239  SMLoc S = Tok.getLoc();
3240  if (Tok.isNot(AsmToken::Identifier)) {
3241    Error(S, "shift operator 'asr' or 'lsl' expected");
3242    return MatchOperand_ParseFail;
3243  }
3244  StringRef ShiftName = Tok.getString();
3245  bool isASR;
3246  if (ShiftName == "lsl" || ShiftName == "LSL")
3247    isASR = false;
3248  else if (ShiftName == "asr" || ShiftName == "ASR")
3249    isASR = true;
3250  else {
3251    Error(S, "shift operator 'asr' or 'lsl' expected");
3252    return MatchOperand_ParseFail;
3253  }
3254  Parser.Lex(); // Eat the operator.
3255
3256  // A '#' and a shift amount.
3257  if (Parser.getTok().isNot(AsmToken::Hash) &&
3258      Parser.getTok().isNot(AsmToken::Dollar)) {
3259    Error(Parser.getTok().getLoc(), "'#' expected");
3260    return MatchOperand_ParseFail;
3261  }
3262  Parser.Lex(); // Eat hash token.
3263
3264  const MCExpr *ShiftAmount;
3265  SMLoc E = Parser.getTok().getLoc();
3266  if (getParser().ParseExpression(ShiftAmount)) {
3267    Error(E, "malformed shift expression");
3268    return MatchOperand_ParseFail;
3269  }
3270  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3271  if (!CE) {
3272    Error(E, "shift amount must be an immediate");
3273    return MatchOperand_ParseFail;
3274  }
3275
3276  int64_t Val = CE->getValue();
3277  if (isASR) {
3278    // Shift amount must be in [1,32]
3279    if (Val < 1 || Val > 32) {
3280      Error(E, "'asr' shift amount must be in range [1,32]");
3281      return MatchOperand_ParseFail;
3282    }
3283    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3284    if (isThumb() && Val == 32) {
3285      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3286      return MatchOperand_ParseFail;
3287    }
3288    if (Val == 32) Val = 0;
3289  } else {
3290    // Shift amount must be in [1,32]
3291    if (Val < 0 || Val > 31) {
3292      Error(E, "'lsr' shift amount must be in range [0,31]");
3293      return MatchOperand_ParseFail;
3294    }
3295  }
3296
3297  E = Parser.getTok().getLoc();
3298  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3299
3300  return MatchOperand_Success;
3301}
3302
3303/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3304/// of instructions. Legal values are:
3305///     ror #n  'n' in {0, 8, 16, 24}
3306ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3307parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3308  const AsmToken &Tok = Parser.getTok();
3309  SMLoc S = Tok.getLoc();
3310  if (Tok.isNot(AsmToken::Identifier))
3311    return MatchOperand_NoMatch;
3312  StringRef ShiftName = Tok.getString();
3313  if (ShiftName != "ror" && ShiftName != "ROR")
3314    return MatchOperand_NoMatch;
3315  Parser.Lex(); // Eat the operator.
3316
3317  // A '#' and a rotate amount.
3318  if (Parser.getTok().isNot(AsmToken::Hash) &&
3319      Parser.getTok().isNot(AsmToken::Dollar)) {
3320    Error(Parser.getTok().getLoc(), "'#' expected");
3321    return MatchOperand_ParseFail;
3322  }
3323  Parser.Lex(); // Eat hash token.
3324
3325  const MCExpr *ShiftAmount;
3326  SMLoc E = Parser.getTok().getLoc();
3327  if (getParser().ParseExpression(ShiftAmount)) {
3328    Error(E, "malformed rotate expression");
3329    return MatchOperand_ParseFail;
3330  }
3331  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3332  if (!CE) {
3333    Error(E, "rotate amount must be an immediate");
3334    return MatchOperand_ParseFail;
3335  }
3336
3337  int64_t Val = CE->getValue();
3338  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3339  // normally, zero is represented in asm by omitting the rotate operand
3340  // entirely.
3341  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3342    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3343    return MatchOperand_ParseFail;
3344  }
3345
3346  E = Parser.getTok().getLoc();
3347  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3348
3349  return MatchOperand_Success;
3350}
3351
3352ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3353parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3354  SMLoc S = Parser.getTok().getLoc();
3355  // The bitfield descriptor is really two operands, the LSB and the width.
3356  if (Parser.getTok().isNot(AsmToken::Hash) &&
3357      Parser.getTok().isNot(AsmToken::Dollar)) {
3358    Error(Parser.getTok().getLoc(), "'#' expected");
3359    return MatchOperand_ParseFail;
3360  }
3361  Parser.Lex(); // Eat hash token.
3362
3363  const MCExpr *LSBExpr;
3364  SMLoc E = Parser.getTok().getLoc();
3365  if (getParser().ParseExpression(LSBExpr)) {
3366    Error(E, "malformed immediate expression");
3367    return MatchOperand_ParseFail;
3368  }
3369  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3370  if (!CE) {
3371    Error(E, "'lsb' operand must be an immediate");
3372    return MatchOperand_ParseFail;
3373  }
3374
3375  int64_t LSB = CE->getValue();
3376  // The LSB must be in the range [0,31]
3377  if (LSB < 0 || LSB > 31) {
3378    Error(E, "'lsb' operand must be in the range [0,31]");
3379    return MatchOperand_ParseFail;
3380  }
3381  E = Parser.getTok().getLoc();
3382
3383  // Expect another immediate operand.
3384  if (Parser.getTok().isNot(AsmToken::Comma)) {
3385    Error(Parser.getTok().getLoc(), "too few operands");
3386    return MatchOperand_ParseFail;
3387  }
3388  Parser.Lex(); // Eat hash token.
3389  if (Parser.getTok().isNot(AsmToken::Hash) &&
3390      Parser.getTok().isNot(AsmToken::Dollar)) {
3391    Error(Parser.getTok().getLoc(), "'#' expected");
3392    return MatchOperand_ParseFail;
3393  }
3394  Parser.Lex(); // Eat hash token.
3395
3396  const MCExpr *WidthExpr;
3397  if (getParser().ParseExpression(WidthExpr)) {
3398    Error(E, "malformed immediate expression");
3399    return MatchOperand_ParseFail;
3400  }
3401  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3402  if (!CE) {
3403    Error(E, "'width' operand must be an immediate");
3404    return MatchOperand_ParseFail;
3405  }
3406
3407  int64_t Width = CE->getValue();
3408  // The LSB must be in the range [1,32-lsb]
3409  if (Width < 1 || Width > 32 - LSB) {
3410    Error(E, "'width' operand must be in the range [1,32-lsb]");
3411    return MatchOperand_ParseFail;
3412  }
3413  E = Parser.getTok().getLoc();
3414
3415  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3416
3417  return MatchOperand_Success;
3418}
3419
3420ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3421parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3422  // Check for a post-index addressing register operand. Specifically:
3423  // postidx_reg := '+' register {, shift}
3424  //              | '-' register {, shift}
3425  //              | register {, shift}
3426
3427  // This method must return MatchOperand_NoMatch without consuming any tokens
3428  // in the case where there is no match, as other alternatives take other
3429  // parse methods.
3430  AsmToken Tok = Parser.getTok();
3431  SMLoc S = Tok.getLoc();
3432  bool haveEaten = false;
3433  bool isAdd = true;
3434  int Reg = -1;
3435  if (Tok.is(AsmToken::Plus)) {
3436    Parser.Lex(); // Eat the '+' token.
3437    haveEaten = true;
3438  } else if (Tok.is(AsmToken::Minus)) {
3439    Parser.Lex(); // Eat the '-' token.
3440    isAdd = false;
3441    haveEaten = true;
3442  }
3443  if (Parser.getTok().is(AsmToken::Identifier))
3444    Reg = tryParseRegister();
3445  if (Reg == -1) {
3446    if (!haveEaten)
3447      return MatchOperand_NoMatch;
3448    Error(Parser.getTok().getLoc(), "register expected");
3449    return MatchOperand_ParseFail;
3450  }
3451  SMLoc E = Parser.getTok().getLoc();
3452
3453  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3454  unsigned ShiftImm = 0;
3455  if (Parser.getTok().is(AsmToken::Comma)) {
3456    Parser.Lex(); // Eat the ','.
3457    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3458      return MatchOperand_ParseFail;
3459  }
3460
3461  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3462                                                  ShiftImm, S, E));
3463
3464  return MatchOperand_Success;
3465}
3466
3467ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3468parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3469  // Check for a post-index addressing register operand. Specifically:
3470  // am3offset := '+' register
3471  //              | '-' register
3472  //              | register
3473  //              | # imm
3474  //              | # + imm
3475  //              | # - imm
3476
3477  // This method must return MatchOperand_NoMatch without consuming any tokens
3478  // in the case where there is no match, as other alternatives take other
3479  // parse methods.
3480  AsmToken Tok = Parser.getTok();
3481  SMLoc S = Tok.getLoc();
3482
3483  // Do immediates first, as we always parse those if we have a '#'.
3484  if (Parser.getTok().is(AsmToken::Hash) ||
3485      Parser.getTok().is(AsmToken::Dollar)) {
3486    Parser.Lex(); // Eat the '#'.
3487    // Explicitly look for a '-', as we need to encode negative zero
3488    // differently.
3489    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3490    const MCExpr *Offset;
3491    if (getParser().ParseExpression(Offset))
3492      return MatchOperand_ParseFail;
3493    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3494    if (!CE) {
3495      Error(S, "constant expression expected");
3496      return MatchOperand_ParseFail;
3497    }
3498    SMLoc E = Tok.getLoc();
3499    // Negative zero is encoded as the flag value INT32_MIN.
3500    int32_t Val = CE->getValue();
3501    if (isNegative && Val == 0)
3502      Val = INT32_MIN;
3503
3504    Operands.push_back(
3505      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3506
3507    return MatchOperand_Success;
3508  }
3509
3510
3511  bool haveEaten = false;
3512  bool isAdd = true;
3513  int Reg = -1;
3514  if (Tok.is(AsmToken::Plus)) {
3515    Parser.Lex(); // Eat the '+' token.
3516    haveEaten = true;
3517  } else if (Tok.is(AsmToken::Minus)) {
3518    Parser.Lex(); // Eat the '-' token.
3519    isAdd = false;
3520    haveEaten = true;
3521  }
3522  if (Parser.getTok().is(AsmToken::Identifier))
3523    Reg = tryParseRegister();
3524  if (Reg == -1) {
3525    if (!haveEaten)
3526      return MatchOperand_NoMatch;
3527    Error(Parser.getTok().getLoc(), "register expected");
3528    return MatchOperand_ParseFail;
3529  }
3530  SMLoc E = Parser.getTok().getLoc();
3531
3532  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3533                                                  0, S, E));
3534
3535  return MatchOperand_Success;
3536}
3537
3538/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3539/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3540/// when they refer multiple MIOperands inside a single one.
3541bool ARMAsmParser::
3542cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3543             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3544  // Rt, Rt2
3545  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3546  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3547  // Create a writeback register dummy placeholder.
3548  Inst.addOperand(MCOperand::CreateReg(0));
3549  // addr
3550  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3551  // pred
3552  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3553  return true;
3554}
3555
3556/// cvtT2StrdPre - Convert parsed operands to MCInst.
3557/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3558/// when they refer multiple MIOperands inside a single one.
3559bool ARMAsmParser::
3560cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3561             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3562  // Create a writeback register dummy placeholder.
3563  Inst.addOperand(MCOperand::CreateReg(0));
3564  // Rt, Rt2
3565  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3566  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3567  // addr
3568  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3569  // pred
3570  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3571  return true;
3572}
3573
3574/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3575/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3576/// when they refer multiple MIOperands inside a single one.
3577bool ARMAsmParser::
3578cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3579                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3580  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3581
3582  // Create a writeback register dummy placeholder.
3583  Inst.addOperand(MCOperand::CreateImm(0));
3584
3585  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3586  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3587  return true;
3588}
3589
3590/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3591/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3592/// when they refer multiple MIOperands inside a single one.
3593bool ARMAsmParser::
3594cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3595                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3596  // Create a writeback register dummy placeholder.
3597  Inst.addOperand(MCOperand::CreateImm(0));
3598  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3599  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3600  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3601  return true;
3602}
3603
3604/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3605/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3606/// when they refer multiple MIOperands inside a single one.
3607bool ARMAsmParser::
3608cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3609                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3610  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3611
3612  // Create a writeback register dummy placeholder.
3613  Inst.addOperand(MCOperand::CreateImm(0));
3614
3615  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3616  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3617  return true;
3618}
3619
3620/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3621/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3622/// when they refer multiple MIOperands inside a single one.
3623bool ARMAsmParser::
3624cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3625                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3626  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3627
3628  // Create a writeback register dummy placeholder.
3629  Inst.addOperand(MCOperand::CreateImm(0));
3630
3631  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3632  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3633  return true;
3634}
3635
3636
3637/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3638/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3639/// when they refer multiple MIOperands inside a single one.
3640bool ARMAsmParser::
3641cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3642                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3643  // Create a writeback register dummy placeholder.
3644  Inst.addOperand(MCOperand::CreateImm(0));
3645  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3646  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3647  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3648  return true;
3649}
3650
3651/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3652/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3653/// when they refer multiple MIOperands inside a single one.
3654bool ARMAsmParser::
3655cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3656                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3657  // Create a writeback register dummy placeholder.
3658  Inst.addOperand(MCOperand::CreateImm(0));
3659  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3660  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3661  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3662  return true;
3663}
3664
3665/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3666/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3667/// when they refer multiple MIOperands inside a single one.
3668bool ARMAsmParser::
3669cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3670                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3671  // Create a writeback register dummy placeholder.
3672  Inst.addOperand(MCOperand::CreateImm(0));
3673  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3674  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3675  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3676  return true;
3677}
3678
3679/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3680/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3681/// when they refer multiple MIOperands inside a single one.
3682bool ARMAsmParser::
3683cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3684                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3685  // Rt
3686  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3687  // Create a writeback register dummy placeholder.
3688  Inst.addOperand(MCOperand::CreateImm(0));
3689  // addr
3690  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3691  // offset
3692  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3693  // pred
3694  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3695  return true;
3696}
3697
3698/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3699/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3700/// when they refer multiple MIOperands inside a single one.
3701bool ARMAsmParser::
3702cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3703                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3704  // Rt
3705  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3706  // Create a writeback register dummy placeholder.
3707  Inst.addOperand(MCOperand::CreateImm(0));
3708  // addr
3709  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3710  // offset
3711  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3712  // pred
3713  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3714  return true;
3715}
3716
3717/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3718/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3719/// when they refer multiple MIOperands inside a single one.
3720bool ARMAsmParser::
3721cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3722                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3723  // Create a writeback register dummy placeholder.
3724  Inst.addOperand(MCOperand::CreateImm(0));
3725  // Rt
3726  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3727  // addr
3728  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3729  // offset
3730  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3731  // pred
3732  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3733  return true;
3734}
3735
3736/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3737/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3738/// when they refer multiple MIOperands inside a single one.
3739bool ARMAsmParser::
3740cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3741                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3742  // Create a writeback register dummy placeholder.
3743  Inst.addOperand(MCOperand::CreateImm(0));
3744  // Rt
3745  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3746  // addr
3747  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3748  // offset
3749  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3750  // pred
3751  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3752  return true;
3753}
3754
3755/// cvtLdrdPre - Convert parsed operands to MCInst.
3756/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3757/// when they refer multiple MIOperands inside a single one.
3758bool ARMAsmParser::
3759cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3760           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3761  // Rt, Rt2
3762  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3763  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3764  // Create a writeback register dummy placeholder.
3765  Inst.addOperand(MCOperand::CreateImm(0));
3766  // addr
3767  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3768  // pred
3769  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3770  return true;
3771}
3772
3773/// cvtStrdPre - Convert parsed operands to MCInst.
3774/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3775/// when they refer multiple MIOperands inside a single one.
3776bool ARMAsmParser::
3777cvtStrdPre(MCInst &Inst, unsigned Opcode,
3778           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3779  // Create a writeback register dummy placeholder.
3780  Inst.addOperand(MCOperand::CreateImm(0));
3781  // Rt, Rt2
3782  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3783  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3784  // addr
3785  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3786  // pred
3787  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3788  return true;
3789}
3790
3791/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3792/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3793/// when they refer multiple MIOperands inside a single one.
3794bool ARMAsmParser::
3795cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3796                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3797  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3798  // Create a writeback register dummy placeholder.
3799  Inst.addOperand(MCOperand::CreateImm(0));
3800  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3801  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3802  return true;
3803}
3804
3805/// cvtThumbMultiple- Convert parsed operands to MCInst.
3806/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3807/// when they refer multiple MIOperands inside a single one.
3808bool ARMAsmParser::
3809cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3810           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3811  // The second source operand must be the same register as the destination
3812  // operand.
3813  if (Operands.size() == 6 &&
3814      (((ARMOperand*)Operands[3])->getReg() !=
3815       ((ARMOperand*)Operands[5])->getReg()) &&
3816      (((ARMOperand*)Operands[3])->getReg() !=
3817       ((ARMOperand*)Operands[4])->getReg())) {
3818    Error(Operands[3]->getStartLoc(),
3819          "destination register must match source register");
3820    return false;
3821  }
3822  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3823  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3824  // If we have a three-operand form, make sure to set Rn to be the operand
3825  // that isn't the same as Rd.
3826  unsigned RegOp = 4;
3827  if (Operands.size() == 6 &&
3828      ((ARMOperand*)Operands[4])->getReg() ==
3829        ((ARMOperand*)Operands[3])->getReg())
3830    RegOp = 5;
3831  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3832  Inst.addOperand(Inst.getOperand(0));
3833  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3834
3835  return true;
3836}
3837
3838bool ARMAsmParser::
3839cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3840              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3841  // Vd
3842  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3843  // Create a writeback register dummy placeholder.
3844  Inst.addOperand(MCOperand::CreateImm(0));
3845  // Vn
3846  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3847  // pred
3848  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3849  return true;
3850}
3851
3852bool ARMAsmParser::
3853cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3854                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3855  // Vd
3856  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3857  // Create a writeback register dummy placeholder.
3858  Inst.addOperand(MCOperand::CreateImm(0));
3859  // Vn
3860  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3861  // Vm
3862  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3863  // pred
3864  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3865  return true;
3866}
3867
3868bool ARMAsmParser::
3869cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3870              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3871  // Create a writeback register dummy placeholder.
3872  Inst.addOperand(MCOperand::CreateImm(0));
3873  // Vn
3874  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3875  // Vt
3876  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3877  // pred
3878  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3879  return true;
3880}
3881
3882bool ARMAsmParser::
3883cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3884                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3885  // Create a writeback register dummy placeholder.
3886  Inst.addOperand(MCOperand::CreateImm(0));
3887  // Vn
3888  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3889  // Vm
3890  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3891  // Vt
3892  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3893  // pred
3894  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3895  return true;
3896}
3897
3898/// Parse an ARM memory expression, return false if successful else return true
3899/// or an error.  The first token must be a '[' when called.
3900bool ARMAsmParser::
3901parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3902  SMLoc S, E;
3903  assert(Parser.getTok().is(AsmToken::LBrac) &&
3904         "Token is not a Left Bracket");
3905  S = Parser.getTok().getLoc();
3906  Parser.Lex(); // Eat left bracket token.
3907
3908  const AsmToken &BaseRegTok = Parser.getTok();
3909  int BaseRegNum = tryParseRegister();
3910  if (BaseRegNum == -1)
3911    return Error(BaseRegTok.getLoc(), "register expected");
3912
3913  // The next token must either be a comma or a closing bracket.
3914  const AsmToken &Tok = Parser.getTok();
3915  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3916    return Error(Tok.getLoc(), "malformed memory operand");
3917
3918  if (Tok.is(AsmToken::RBrac)) {
3919    E = Tok.getLoc();
3920    Parser.Lex(); // Eat right bracket token.
3921
3922    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3923                                             0, 0, false, S, E));
3924
3925    // If there's a pre-indexing writeback marker, '!', just add it as a token
3926    // operand. It's rather odd, but syntactically valid.
3927    if (Parser.getTok().is(AsmToken::Exclaim)) {
3928      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3929      Parser.Lex(); // Eat the '!'.
3930    }
3931
3932    return false;
3933  }
3934
3935  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
3936  Parser.Lex(); // Eat the comma.
3937
3938  // If we have a ':', it's an alignment specifier.
3939  if (Parser.getTok().is(AsmToken::Colon)) {
3940    Parser.Lex(); // Eat the ':'.
3941    E = Parser.getTok().getLoc();
3942
3943    const MCExpr *Expr;
3944    if (getParser().ParseExpression(Expr))
3945     return true;
3946
3947    // The expression has to be a constant. Memory references with relocations
3948    // don't come through here, as they use the <label> forms of the relevant
3949    // instructions.
3950    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3951    if (!CE)
3952      return Error (E, "constant expression expected");
3953
3954    unsigned Align = 0;
3955    switch (CE->getValue()) {
3956    default:
3957      return Error(E,
3958                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
3959    case 16:  Align = 2; break;
3960    case 32:  Align = 4; break;
3961    case 64:  Align = 8; break;
3962    case 128: Align = 16; break;
3963    case 256: Align = 32; break;
3964    }
3965
3966    // Now we should have the closing ']'
3967    E = Parser.getTok().getLoc();
3968    if (Parser.getTok().isNot(AsmToken::RBrac))
3969      return Error(E, "']' expected");
3970    Parser.Lex(); // Eat right bracket token.
3971
3972    // Don't worry about range checking the value here. That's handled by
3973    // the is*() predicates.
3974    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
3975                                             ARM_AM::no_shift, 0, Align,
3976                                             false, S, E));
3977
3978    // If there's a pre-indexing writeback marker, '!', just add it as a token
3979    // operand.
3980    if (Parser.getTok().is(AsmToken::Exclaim)) {
3981      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3982      Parser.Lex(); // Eat the '!'.
3983    }
3984
3985    return false;
3986  }
3987
3988  // If we have a '#', it's an immediate offset, else assume it's a register
3989  // offset. Be friendly and also accept a plain integer (without a leading
3990  // hash) for gas compatibility.
3991  if (Parser.getTok().is(AsmToken::Hash) ||
3992      Parser.getTok().is(AsmToken::Dollar) ||
3993      Parser.getTok().is(AsmToken::Integer)) {
3994    if (Parser.getTok().isNot(AsmToken::Integer))
3995      Parser.Lex(); // Eat the '#'.
3996    E = Parser.getTok().getLoc();
3997
3998    bool isNegative = getParser().getTok().is(AsmToken::Minus);
3999    const MCExpr *Offset;
4000    if (getParser().ParseExpression(Offset))
4001     return true;
4002
4003    // The expression has to be a constant. Memory references with relocations
4004    // don't come through here, as they use the <label> forms of the relevant
4005    // instructions.
4006    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4007    if (!CE)
4008      return Error (E, "constant expression expected");
4009
4010    // If the constant was #-0, represent it as INT32_MIN.
4011    int32_t Val = CE->getValue();
4012    if (isNegative && Val == 0)
4013      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4014
4015    // Now we should have the closing ']'
4016    E = Parser.getTok().getLoc();
4017    if (Parser.getTok().isNot(AsmToken::RBrac))
4018      return Error(E, "']' expected");
4019    Parser.Lex(); // Eat right bracket token.
4020
4021    // Don't worry about range checking the value here. That's handled by
4022    // the is*() predicates.
4023    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4024                                             ARM_AM::no_shift, 0, 0,
4025                                             false, S, E));
4026
4027    // If there's a pre-indexing writeback marker, '!', just add it as a token
4028    // operand.
4029    if (Parser.getTok().is(AsmToken::Exclaim)) {
4030      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4031      Parser.Lex(); // Eat the '!'.
4032    }
4033
4034    return false;
4035  }
4036
4037  // The register offset is optionally preceded by a '+' or '-'
4038  bool isNegative = false;
4039  if (Parser.getTok().is(AsmToken::Minus)) {
4040    isNegative = true;
4041    Parser.Lex(); // Eat the '-'.
4042  } else if (Parser.getTok().is(AsmToken::Plus)) {
4043    // Nothing to do.
4044    Parser.Lex(); // Eat the '+'.
4045  }
4046
4047  E = Parser.getTok().getLoc();
4048  int OffsetRegNum = tryParseRegister();
4049  if (OffsetRegNum == -1)
4050    return Error(E, "register expected");
4051
4052  // If there's a shift operator, handle it.
4053  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4054  unsigned ShiftImm = 0;
4055  if (Parser.getTok().is(AsmToken::Comma)) {
4056    Parser.Lex(); // Eat the ','.
4057    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4058      return true;
4059  }
4060
4061  // Now we should have the closing ']'
4062  E = Parser.getTok().getLoc();
4063  if (Parser.getTok().isNot(AsmToken::RBrac))
4064    return Error(E, "']' expected");
4065  Parser.Lex(); // Eat right bracket token.
4066
4067  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4068                                           ShiftType, ShiftImm, 0, isNegative,
4069                                           S, E));
4070
4071  // If there's a pre-indexing writeback marker, '!', just add it as a token
4072  // operand.
4073  if (Parser.getTok().is(AsmToken::Exclaim)) {
4074    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4075    Parser.Lex(); // Eat the '!'.
4076  }
4077
4078  return false;
4079}
4080
4081/// parseMemRegOffsetShift - one of these two:
4082///   ( lsl | lsr | asr | ror ) , # shift_amount
4083///   rrx
4084/// return true if it parses a shift otherwise it returns false.
4085bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4086                                          unsigned &Amount) {
4087  SMLoc Loc = Parser.getTok().getLoc();
4088  const AsmToken &Tok = Parser.getTok();
4089  if (Tok.isNot(AsmToken::Identifier))
4090    return true;
4091  StringRef ShiftName = Tok.getString();
4092  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4093      ShiftName == "asl" || ShiftName == "ASL")
4094    St = ARM_AM::lsl;
4095  else if (ShiftName == "lsr" || ShiftName == "LSR")
4096    St = ARM_AM::lsr;
4097  else if (ShiftName == "asr" || ShiftName == "ASR")
4098    St = ARM_AM::asr;
4099  else if (ShiftName == "ror" || ShiftName == "ROR")
4100    St = ARM_AM::ror;
4101  else if (ShiftName == "rrx" || ShiftName == "RRX")
4102    St = ARM_AM::rrx;
4103  else
4104    return Error(Loc, "illegal shift operator");
4105  Parser.Lex(); // Eat shift type token.
4106
4107  // rrx stands alone.
4108  Amount = 0;
4109  if (St != ARM_AM::rrx) {
4110    Loc = Parser.getTok().getLoc();
4111    // A '#' and a shift amount.
4112    const AsmToken &HashTok = Parser.getTok();
4113    if (HashTok.isNot(AsmToken::Hash) &&
4114        HashTok.isNot(AsmToken::Dollar))
4115      return Error(HashTok.getLoc(), "'#' expected");
4116    Parser.Lex(); // Eat hash token.
4117
4118    const MCExpr *Expr;
4119    if (getParser().ParseExpression(Expr))
4120      return true;
4121    // Range check the immediate.
4122    // lsl, ror: 0 <= imm <= 31
4123    // lsr, asr: 0 <= imm <= 32
4124    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4125    if (!CE)
4126      return Error(Loc, "shift amount must be an immediate");
4127    int64_t Imm = CE->getValue();
4128    if (Imm < 0 ||
4129        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4130        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4131      return Error(Loc, "immediate shift value out of range");
4132    Amount = Imm;
4133  }
4134
4135  return false;
4136}
4137
4138/// parseFPImm - A floating point immediate expression operand.
4139ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4140parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4141  SMLoc S = Parser.getTok().getLoc();
4142
4143  if (Parser.getTok().isNot(AsmToken::Hash) &&
4144      Parser.getTok().isNot(AsmToken::Dollar))
4145    return MatchOperand_NoMatch;
4146
4147  // Disambiguate the VMOV forms that can accept an FP immediate.
4148  // vmov.f32 <sreg>, #imm
4149  // vmov.f64 <dreg>, #imm
4150  // vmov.f32 <dreg>, #imm  @ vector f32x2
4151  // vmov.f32 <qreg>, #imm  @ vector f32x4
4152  //
4153  // There are also the NEON VMOV instructions which expect an
4154  // integer constant. Make sure we don't try to parse an FPImm
4155  // for these:
4156  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4157  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4158  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4159                           TyOp->getToken() != ".f64"))
4160    return MatchOperand_NoMatch;
4161
4162  Parser.Lex(); // Eat the '#'.
4163
4164  // Handle negation, as that still comes through as a separate token.
4165  bool isNegative = false;
4166  if (Parser.getTok().is(AsmToken::Minus)) {
4167    isNegative = true;
4168    Parser.Lex();
4169  }
4170  const AsmToken &Tok = Parser.getTok();
4171  if (Tok.is(AsmToken::Real)) {
4172    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4173    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4174    // If we had a '-' in front, toggle the sign bit.
4175    IntVal ^= (uint64_t)isNegative << 63;
4176    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4177    Parser.Lex(); // Eat the token.
4178    if (Val == -1) {
4179      TokError("floating point value out of range");
4180      return MatchOperand_ParseFail;
4181    }
4182    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4183    return MatchOperand_Success;
4184  }
4185  if (Tok.is(AsmToken::Integer)) {
4186    int64_t Val = Tok.getIntVal();
4187    Parser.Lex(); // Eat the token.
4188    if (Val > 255 || Val < 0) {
4189      TokError("encoded floating point value out of range");
4190      return MatchOperand_ParseFail;
4191    }
4192    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4193    return MatchOperand_Success;
4194  }
4195
4196  TokError("invalid floating point immediate");
4197  return MatchOperand_ParseFail;
4198}
4199/// Parse a arm instruction operand.  For now this parses the operand regardless
4200/// of the mnemonic.
4201bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4202                                StringRef Mnemonic) {
4203  SMLoc S, E;
4204
4205  // Check if the current operand has a custom associated parser, if so, try to
4206  // custom parse the operand, or fallback to the general approach.
4207  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4208  if (ResTy == MatchOperand_Success)
4209    return false;
4210  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4211  // there was a match, but an error occurred, in which case, just return that
4212  // the operand parsing failed.
4213  if (ResTy == MatchOperand_ParseFail)
4214    return true;
4215
4216  switch (getLexer().getKind()) {
4217  default:
4218    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4219    return true;
4220  case AsmToken::Identifier: {
4221    // If this is VMRS, check for the apsr_nzcv operand.
4222    if (!tryParseRegisterWithWriteBack(Operands))
4223      return false;
4224    int Res = tryParseShiftRegister(Operands);
4225    if (Res == 0) // success
4226      return false;
4227    else if (Res == -1) // irrecoverable error
4228      return true;
4229    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4230      S = Parser.getTok().getLoc();
4231      Parser.Lex();
4232      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4233      return false;
4234    }
4235
4236    // Fall though for the Identifier case that is not a register or a
4237    // special name.
4238  }
4239  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4240  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4241  case AsmToken::String:  // quoted label names.
4242  case AsmToken::Dot: {   // . as a branch target
4243    // This was not a register so parse other operands that start with an
4244    // identifier (like labels) as expressions and create them as immediates.
4245    const MCExpr *IdVal;
4246    S = Parser.getTok().getLoc();
4247    if (getParser().ParseExpression(IdVal))
4248      return true;
4249    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4250    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4251    return false;
4252  }
4253  case AsmToken::LBrac:
4254    return parseMemory(Operands);
4255  case AsmToken::LCurly:
4256    return parseRegisterList(Operands);
4257  case AsmToken::Dollar:
4258  case AsmToken::Hash: {
4259    // #42 -> immediate.
4260    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4261    S = Parser.getTok().getLoc();
4262    Parser.Lex();
4263    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4264    const MCExpr *ImmVal;
4265    if (getParser().ParseExpression(ImmVal))
4266      return true;
4267    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4268    if (CE) {
4269      int32_t Val = CE->getValue();
4270      if (isNegative && Val == 0)
4271        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4272    }
4273    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4274    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4275    return false;
4276  }
4277  case AsmToken::Colon: {
4278    // ":lower16:" and ":upper16:" expression prefixes
4279    // FIXME: Check it's an expression prefix,
4280    // e.g. (FOO - :lower16:BAR) isn't legal.
4281    ARMMCExpr::VariantKind RefKind;
4282    if (parsePrefix(RefKind))
4283      return true;
4284
4285    const MCExpr *SubExprVal;
4286    if (getParser().ParseExpression(SubExprVal))
4287      return true;
4288
4289    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4290                                                   getContext());
4291    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4292    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4293    return false;
4294  }
4295  }
4296}
4297
4298// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4299//  :lower16: and :upper16:.
4300bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4301  RefKind = ARMMCExpr::VK_ARM_None;
4302
4303  // :lower16: and :upper16: modifiers
4304  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4305  Parser.Lex(); // Eat ':'
4306
4307  if (getLexer().isNot(AsmToken::Identifier)) {
4308    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4309    return true;
4310  }
4311
4312  StringRef IDVal = Parser.getTok().getIdentifier();
4313  if (IDVal == "lower16") {
4314    RefKind = ARMMCExpr::VK_ARM_LO16;
4315  } else if (IDVal == "upper16") {
4316    RefKind = ARMMCExpr::VK_ARM_HI16;
4317  } else {
4318    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4319    return true;
4320  }
4321  Parser.Lex();
4322
4323  if (getLexer().isNot(AsmToken::Colon)) {
4324    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4325    return true;
4326  }
4327  Parser.Lex(); // Eat the last ':'
4328  return false;
4329}
4330
4331/// \brief Given a mnemonic, split out possible predication code and carry
4332/// setting letters to form a canonical mnemonic and flags.
4333//
4334// FIXME: Would be nice to autogen this.
4335// FIXME: This is a bit of a maze of special cases.
4336StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4337                                      unsigned &PredicationCode,
4338                                      bool &CarrySetting,
4339                                      unsigned &ProcessorIMod,
4340                                      StringRef &ITMask) {
4341  PredicationCode = ARMCC::AL;
4342  CarrySetting = false;
4343  ProcessorIMod = 0;
4344
4345  // Ignore some mnemonics we know aren't predicated forms.
4346  //
4347  // FIXME: Would be nice to autogen this.
4348  if ((Mnemonic == "movs" && isThumb()) ||
4349      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4350      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4351      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4352      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4353      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4354      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4355      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal")
4356    return Mnemonic;
4357
4358  // First, split out any predication code. Ignore mnemonics we know aren't
4359  // predicated but do have a carry-set and so weren't caught above.
4360  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4361      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4362      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4363      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4364    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4365      .Case("eq", ARMCC::EQ)
4366      .Case("ne", ARMCC::NE)
4367      .Case("hs", ARMCC::HS)
4368      .Case("cs", ARMCC::HS)
4369      .Case("lo", ARMCC::LO)
4370      .Case("cc", ARMCC::LO)
4371      .Case("mi", ARMCC::MI)
4372      .Case("pl", ARMCC::PL)
4373      .Case("vs", ARMCC::VS)
4374      .Case("vc", ARMCC::VC)
4375      .Case("hi", ARMCC::HI)
4376      .Case("ls", ARMCC::LS)
4377      .Case("ge", ARMCC::GE)
4378      .Case("lt", ARMCC::LT)
4379      .Case("gt", ARMCC::GT)
4380      .Case("le", ARMCC::LE)
4381      .Case("al", ARMCC::AL)
4382      .Default(~0U);
4383    if (CC != ~0U) {
4384      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4385      PredicationCode = CC;
4386    }
4387  }
4388
4389  // Next, determine if we have a carry setting bit. We explicitly ignore all
4390  // the instructions we know end in 's'.
4391  if (Mnemonic.endswith("s") &&
4392      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4393        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4394        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4395        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4396        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4397        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4398        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4399        (Mnemonic == "movs" && isThumb()))) {
4400    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4401    CarrySetting = true;
4402  }
4403
4404  // The "cps" instruction can have a interrupt mode operand which is glued into
4405  // the mnemonic. Check if this is the case, split it and parse the imod op
4406  if (Mnemonic.startswith("cps")) {
4407    // Split out any imod code.
4408    unsigned IMod =
4409      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4410      .Case("ie", ARM_PROC::IE)
4411      .Case("id", ARM_PROC::ID)
4412      .Default(~0U);
4413    if (IMod != ~0U) {
4414      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4415      ProcessorIMod = IMod;
4416    }
4417  }
4418
4419  // The "it" instruction has the condition mask on the end of the mnemonic.
4420  if (Mnemonic.startswith("it")) {
4421    ITMask = Mnemonic.slice(2, Mnemonic.size());
4422    Mnemonic = Mnemonic.slice(0, 2);
4423  }
4424
4425  return Mnemonic;
4426}
4427
4428/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4429/// inclusion of carry set or predication code operands.
4430//
4431// FIXME: It would be nice to autogen this.
4432void ARMAsmParser::
4433getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4434                      bool &CanAcceptPredicationCode) {
4435  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4436      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4437      Mnemonic == "add" || Mnemonic == "adc" ||
4438      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4439      Mnemonic == "orr" || Mnemonic == "mvn" ||
4440      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4441      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4442      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4443                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4444                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4445    CanAcceptCarrySet = true;
4446  } else
4447    CanAcceptCarrySet = false;
4448
4449  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4450      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4451      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4452      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4453      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4454      (Mnemonic == "clrex" && !isThumb()) ||
4455      (Mnemonic == "nop" && isThumbOne()) ||
4456      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4457        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4458        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4459      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4460       !isThumb()) ||
4461      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4462    CanAcceptPredicationCode = false;
4463  } else
4464    CanAcceptPredicationCode = true;
4465
4466  if (isThumb()) {
4467    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4468        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4469      CanAcceptPredicationCode = false;
4470  }
4471}
4472
4473bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4474                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4475  // FIXME: This is all horribly hacky. We really need a better way to deal
4476  // with optional operands like this in the matcher table.
4477
4478  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4479  // another does not. Specifically, the MOVW instruction does not. So we
4480  // special case it here and remove the defaulted (non-setting) cc_out
4481  // operand if that's the instruction we're trying to match.
4482  //
4483  // We do this as post-processing of the explicit operands rather than just
4484  // conditionally adding the cc_out in the first place because we need
4485  // to check the type of the parsed immediate operand.
4486  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4487      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4488      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4489      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4490    return true;
4491
4492  // Register-register 'add' for thumb does not have a cc_out operand
4493  // when there are only two register operands.
4494  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4495      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4496      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4497      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4498    return true;
4499  // Register-register 'add' for thumb does not have a cc_out operand
4500  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4501  // have to check the immediate range here since Thumb2 has a variant
4502  // that can handle a different range and has a cc_out operand.
4503  if (((isThumb() && Mnemonic == "add") ||
4504       (isThumbTwo() && Mnemonic == "sub")) &&
4505      Operands.size() == 6 &&
4506      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4507      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4508      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4509      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4510      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4511       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4512    return true;
4513  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4514  // imm0_4095 variant. That's the least-preferred variant when
4515  // selecting via the generic "add" mnemonic, so to know that we
4516  // should remove the cc_out operand, we have to explicitly check that
4517  // it's not one of the other variants. Ugh.
4518  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4519      Operands.size() == 6 &&
4520      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4521      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4522      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4523    // Nest conditions rather than one big 'if' statement for readability.
4524    //
4525    // If either register is a high reg, it's either one of the SP
4526    // variants (handled above) or a 32-bit encoding, so we just
4527    // check against T3.
4528    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4529         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4530        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4531      return false;
4532    // If both registers are low, we're in an IT block, and the immediate is
4533    // in range, we should use encoding T1 instead, which has a cc_out.
4534    if (inITBlock() &&
4535        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4536        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4537        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4538      return false;
4539
4540    // Otherwise, we use encoding T4, which does not have a cc_out
4541    // operand.
4542    return true;
4543  }
4544
4545  // The thumb2 multiply instruction doesn't have a CCOut register, so
4546  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4547  // use the 16-bit encoding or not.
4548  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4549      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4550      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4551      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4552      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4553      // If the registers aren't low regs, the destination reg isn't the
4554      // same as one of the source regs, or the cc_out operand is zero
4555      // outside of an IT block, we have to use the 32-bit encoding, so
4556      // remove the cc_out operand.
4557      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4558       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4559       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4560       !inITBlock() ||
4561       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4562        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4563        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4564        static_cast<ARMOperand*>(Operands[4])->getReg())))
4565    return true;
4566
4567  // Also check the 'mul' syntax variant that doesn't specify an explicit
4568  // destination register.
4569  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4570      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4571      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4572      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4573      // If the registers aren't low regs  or the cc_out operand is zero
4574      // outside of an IT block, we have to use the 32-bit encoding, so
4575      // remove the cc_out operand.
4576      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4577       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4578       !inITBlock()))
4579    return true;
4580
4581
4582
4583  // Register-register 'add/sub' for thumb does not have a cc_out operand
4584  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4585  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4586  // right, this will result in better diagnostics (which operand is off)
4587  // anyway.
4588  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4589      (Operands.size() == 5 || Operands.size() == 6) &&
4590      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4591      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4592      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4593    return true;
4594
4595  return false;
4596}
4597
4598static bool isDataTypeToken(StringRef Tok) {
4599  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4600    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4601    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4602    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4603    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4604    Tok == ".f" || Tok == ".d";
4605}
4606
4607// FIXME: This bit should probably be handled via an explicit match class
4608// in the .td files that matches the suffix instead of having it be
4609// a literal string token the way it is now.
4610static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4611  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4612}
4613
4614static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4615/// Parse an arm instruction mnemonic followed by its operands.
4616bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4617                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4618  // Apply mnemonic aliases before doing anything else, as the destination
4619  // mnemnonic may include suffices and we want to handle them normally.
4620  // The generic tblgen'erated code does this later, at the start of
4621  // MatchInstructionImpl(), but that's too late for aliases that include
4622  // any sort of suffix.
4623  unsigned AvailableFeatures = getAvailableFeatures();
4624  applyMnemonicAliases(Name, AvailableFeatures);
4625
4626  // First check for the ARM-specific .req directive.
4627  if (Parser.getTok().is(AsmToken::Identifier) &&
4628      Parser.getTok().getIdentifier() == ".req") {
4629    parseDirectiveReq(Name, NameLoc);
4630    // We always return 'error' for this, as we're done with this
4631    // statement and don't need to match the 'instruction."
4632    return true;
4633  }
4634
4635  // Create the leading tokens for the mnemonic, split by '.' characters.
4636  size_t Start = 0, Next = Name.find('.');
4637  StringRef Mnemonic = Name.slice(Start, Next);
4638
4639  // Split out the predication code and carry setting flag from the mnemonic.
4640  unsigned PredicationCode;
4641  unsigned ProcessorIMod;
4642  bool CarrySetting;
4643  StringRef ITMask;
4644  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4645                           ProcessorIMod, ITMask);
4646
4647  // In Thumb1, only the branch (B) instruction can be predicated.
4648  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4649    Parser.EatToEndOfStatement();
4650    return Error(NameLoc, "conditional execution not supported in Thumb1");
4651  }
4652
4653  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4654
4655  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4656  // is the mask as it will be for the IT encoding if the conditional
4657  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4658  // where the conditional bit0 is zero, the instruction post-processing
4659  // will adjust the mask accordingly.
4660  if (Mnemonic == "it") {
4661    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4662    if (ITMask.size() > 3) {
4663      Parser.EatToEndOfStatement();
4664      return Error(Loc, "too many conditions on IT instruction");
4665    }
4666    unsigned Mask = 8;
4667    for (unsigned i = ITMask.size(); i != 0; --i) {
4668      char pos = ITMask[i - 1];
4669      if (pos != 't' && pos != 'e') {
4670        Parser.EatToEndOfStatement();
4671        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4672      }
4673      Mask >>= 1;
4674      if (ITMask[i - 1] == 't')
4675        Mask |= 8;
4676    }
4677    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4678  }
4679
4680  // FIXME: This is all a pretty gross hack. We should automatically handle
4681  // optional operands like this via tblgen.
4682
4683  // Next, add the CCOut and ConditionCode operands, if needed.
4684  //
4685  // For mnemonics which can ever incorporate a carry setting bit or predication
4686  // code, our matching model involves us always generating CCOut and
4687  // ConditionCode operands to match the mnemonic "as written" and then we let
4688  // the matcher deal with finding the right instruction or generating an
4689  // appropriate error.
4690  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4691  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4692
4693  // If we had a carry-set on an instruction that can't do that, issue an
4694  // error.
4695  if (!CanAcceptCarrySet && CarrySetting) {
4696    Parser.EatToEndOfStatement();
4697    return Error(NameLoc, "instruction '" + Mnemonic +
4698                 "' can not set flags, but 's' suffix specified");
4699  }
4700  // If we had a predication code on an instruction that can't do that, issue an
4701  // error.
4702  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4703    Parser.EatToEndOfStatement();
4704    return Error(NameLoc, "instruction '" + Mnemonic +
4705                 "' is not predicable, but condition code specified");
4706  }
4707
4708  // Add the carry setting operand, if necessary.
4709  if (CanAcceptCarrySet) {
4710    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4711    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4712                                               Loc));
4713  }
4714
4715  // Add the predication code operand, if necessary.
4716  if (CanAcceptPredicationCode) {
4717    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4718                                      CarrySetting);
4719    Operands.push_back(ARMOperand::CreateCondCode(
4720                         ARMCC::CondCodes(PredicationCode), Loc));
4721  }
4722
4723  // Add the processor imod operand, if necessary.
4724  if (ProcessorIMod) {
4725    Operands.push_back(ARMOperand::CreateImm(
4726          MCConstantExpr::Create(ProcessorIMod, getContext()),
4727                                 NameLoc, NameLoc));
4728  }
4729
4730  // Add the remaining tokens in the mnemonic.
4731  while (Next != StringRef::npos) {
4732    Start = Next;
4733    Next = Name.find('.', Start + 1);
4734    StringRef ExtraToken = Name.slice(Start, Next);
4735
4736    // Some NEON instructions have an optional datatype suffix that is
4737    // completely ignored. Check for that.
4738    if (isDataTypeToken(ExtraToken) &&
4739        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4740      continue;
4741
4742    if (ExtraToken != ".n") {
4743      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4744      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4745    }
4746  }
4747
4748  // Read the remaining operands.
4749  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4750    // Read the first operand.
4751    if (parseOperand(Operands, Mnemonic)) {
4752      Parser.EatToEndOfStatement();
4753      return true;
4754    }
4755
4756    while (getLexer().is(AsmToken::Comma)) {
4757      Parser.Lex();  // Eat the comma.
4758
4759      // Parse and remember the operand.
4760      if (parseOperand(Operands, Mnemonic)) {
4761        Parser.EatToEndOfStatement();
4762        return true;
4763      }
4764    }
4765  }
4766
4767  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4768    SMLoc Loc = getLexer().getLoc();
4769    Parser.EatToEndOfStatement();
4770    return Error(Loc, "unexpected token in argument list");
4771  }
4772
4773  Parser.Lex(); // Consume the EndOfStatement
4774
4775  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4776  // do and don't have a cc_out optional-def operand. With some spot-checks
4777  // of the operand list, we can figure out which variant we're trying to
4778  // parse and adjust accordingly before actually matching. We shouldn't ever
4779  // try to remove a cc_out operand that was explicitly set on the the
4780  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4781  // table driven matcher doesn't fit well with the ARM instruction set.
4782  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4783    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4784    Operands.erase(Operands.begin() + 1);
4785    delete Op;
4786  }
4787
4788  // ARM mode 'blx' need special handling, as the register operand version
4789  // is predicable, but the label operand version is not. So, we can't rely
4790  // on the Mnemonic based checking to correctly figure out when to put
4791  // a k_CondCode operand in the list. If we're trying to match the label
4792  // version, remove the k_CondCode operand here.
4793  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4794      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4795    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4796    Operands.erase(Operands.begin() + 1);
4797    delete Op;
4798  }
4799
4800  // The vector-compare-to-zero instructions have a literal token "#0" at
4801  // the end that comes to here as an immediate operand. Convert it to a
4802  // token to play nicely with the matcher.
4803  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4804      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4805      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4806    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4807    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4808    if (CE && CE->getValue() == 0) {
4809      Operands.erase(Operands.begin() + 5);
4810      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4811      delete Op;
4812    }
4813  }
4814  // VCMP{E} does the same thing, but with a different operand count.
4815  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4816      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4817    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4818    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4819    if (CE && CE->getValue() == 0) {
4820      Operands.erase(Operands.begin() + 4);
4821      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4822      delete Op;
4823    }
4824  }
4825  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4826  // end. Convert it to a token here. Take care not to convert those
4827  // that should hit the Thumb2 encoding.
4828  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4829      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4830      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4831      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4832    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4834    if (CE && CE->getValue() == 0 &&
4835        (isThumbOne() ||
4836         // The cc_out operand matches the IT block.
4837         ((inITBlock() != CarrySetting) &&
4838         // Neither register operand is a high register.
4839         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4840          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4841      Operands.erase(Operands.begin() + 5);
4842      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4843      delete Op;
4844    }
4845  }
4846
4847  return false;
4848}
4849
4850// Validate context-sensitive operand constraints.
4851
4852// return 'true' if register list contains non-low GPR registers,
4853// 'false' otherwise. If Reg is in the register list or is HiReg, set
4854// 'containsReg' to true.
4855static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4856                                 unsigned HiReg, bool &containsReg) {
4857  containsReg = false;
4858  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4859    unsigned OpReg = Inst.getOperand(i).getReg();
4860    if (OpReg == Reg)
4861      containsReg = true;
4862    // Anything other than a low register isn't legal here.
4863    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4864      return true;
4865  }
4866  return false;
4867}
4868
4869// Check if the specified regisgter is in the register list of the inst,
4870// starting at the indicated operand number.
4871static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4872  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4873    unsigned OpReg = Inst.getOperand(i).getReg();
4874    if (OpReg == Reg)
4875      return true;
4876  }
4877  return false;
4878}
4879
4880// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4881// the ARMInsts array) instead. Getting that here requires awkward
4882// API changes, though. Better way?
4883namespace llvm {
4884extern const MCInstrDesc ARMInsts[];
4885}
4886static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4887  return ARMInsts[Opcode];
4888}
4889
4890// FIXME: We would really like to be able to tablegen'erate this.
4891bool ARMAsmParser::
4892validateInstruction(MCInst &Inst,
4893                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4894  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4895  SMLoc Loc = Operands[0]->getStartLoc();
4896  // Check the IT block state first.
4897  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4898  // being allowed in IT blocks, but not being predicable.  It just always
4899  // executes.
4900  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4901    unsigned bit = 1;
4902    if (ITState.FirstCond)
4903      ITState.FirstCond = false;
4904    else
4905      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4906    // The instruction must be predicable.
4907    if (!MCID.isPredicable())
4908      return Error(Loc, "instructions in IT block must be predicable");
4909    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4910    unsigned ITCond = bit ? ITState.Cond :
4911      ARMCC::getOppositeCondition(ITState.Cond);
4912    if (Cond != ITCond) {
4913      // Find the condition code Operand to get its SMLoc information.
4914      SMLoc CondLoc;
4915      for (unsigned i = 1; i < Operands.size(); ++i)
4916        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4917          CondLoc = Operands[i]->getStartLoc();
4918      return Error(CondLoc, "incorrect condition in IT block; got '" +
4919                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4920                   "', but expected '" +
4921                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4922    }
4923  // Check for non-'al' condition codes outside of the IT block.
4924  } else if (isThumbTwo() && MCID.isPredicable() &&
4925             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4926             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4927             Inst.getOpcode() != ARM::t2B)
4928    return Error(Loc, "predicated instructions must be in IT block");
4929
4930  switch (Inst.getOpcode()) {
4931  case ARM::LDRD:
4932  case ARM::LDRD_PRE:
4933  case ARM::LDRD_POST:
4934  case ARM::LDREXD: {
4935    // Rt2 must be Rt + 1.
4936    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4937    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4938    if (Rt2 != Rt + 1)
4939      return Error(Operands[3]->getStartLoc(),
4940                   "destination operands must be sequential");
4941    return false;
4942  }
4943  case ARM::STRD: {
4944    // Rt2 must be Rt + 1.
4945    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4946    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4947    if (Rt2 != Rt + 1)
4948      return Error(Operands[3]->getStartLoc(),
4949                   "source operands must be sequential");
4950    return false;
4951  }
4952  case ARM::STRD_PRE:
4953  case ARM::STRD_POST:
4954  case ARM::STREXD: {
4955    // Rt2 must be Rt + 1.
4956    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4957    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
4958    if (Rt2 != Rt + 1)
4959      return Error(Operands[3]->getStartLoc(),
4960                   "source operands must be sequential");
4961    return false;
4962  }
4963  case ARM::SBFX:
4964  case ARM::UBFX: {
4965    // width must be in range [1, 32-lsb]
4966    unsigned lsb = Inst.getOperand(2).getImm();
4967    unsigned widthm1 = Inst.getOperand(3).getImm();
4968    if (widthm1 >= 32 - lsb)
4969      return Error(Operands[5]->getStartLoc(),
4970                   "bitfield width must be in range [1,32-lsb]");
4971    return false;
4972  }
4973  case ARM::tLDMIA: {
4974    // If we're parsing Thumb2, the .w variant is available and handles
4975    // most cases that are normally illegal for a Thumb1 LDM
4976    // instruction. We'll make the transformation in processInstruction()
4977    // if necessary.
4978    //
4979    // Thumb LDM instructions are writeback iff the base register is not
4980    // in the register list.
4981    unsigned Rn = Inst.getOperand(0).getReg();
4982    bool hasWritebackToken =
4983      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
4984       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
4985    bool listContainsBase;
4986    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
4987      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
4988                   "registers must be in range r0-r7");
4989    // If we should have writeback, then there should be a '!' token.
4990    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
4991      return Error(Operands[2]->getStartLoc(),
4992                   "writeback operator '!' expected");
4993    // If we should not have writeback, there must not be a '!'. This is
4994    // true even for the 32-bit wide encodings.
4995    if (listContainsBase && hasWritebackToken)
4996      return Error(Operands[3]->getStartLoc(),
4997                   "writeback operator '!' not allowed when base register "
4998                   "in register list");
4999
5000    break;
5001  }
5002  case ARM::t2LDMIA_UPD: {
5003    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5004      return Error(Operands[4]->getStartLoc(),
5005                   "writeback operator '!' not allowed when base register "
5006                   "in register list");
5007    break;
5008  }
5009  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5010  // so only issue a diagnostic for thumb1. The instructions will be
5011  // switched to the t2 encodings in processInstruction() if necessary.
5012  case ARM::tPOP: {
5013    bool listContainsBase;
5014    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5015        !isThumbTwo())
5016      return Error(Operands[2]->getStartLoc(),
5017                   "registers must be in range r0-r7 or pc");
5018    break;
5019  }
5020  case ARM::tPUSH: {
5021    bool listContainsBase;
5022    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5023        !isThumbTwo())
5024      return Error(Operands[2]->getStartLoc(),
5025                   "registers must be in range r0-r7 or lr");
5026    break;
5027  }
5028  case ARM::tSTMIA_UPD: {
5029    bool listContainsBase;
5030    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5031      return Error(Operands[4]->getStartLoc(),
5032                   "registers must be in range r0-r7");
5033    break;
5034  }
5035  }
5036
5037  return false;
5038}
5039
5040static unsigned getRealVSTLNOpcode(unsigned Opc) {
5041  switch(Opc) {
5042  default: assert(0 && "unexpected opcode!");
5043  // VST1LN
5044  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5045  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5046  case ARM::VST1LNdWB_fixed_Asm_U8:
5047    return ARM::VST1LNd8_UPD;
5048  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5049  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5050  case ARM::VST1LNdWB_fixed_Asm_U16:
5051    return ARM::VST1LNd16_UPD;
5052  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5053  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5054  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5055    return ARM::VST1LNd32_UPD;
5056  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5057  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5058  case ARM::VST1LNdWB_register_Asm_U8:
5059    return ARM::VST1LNd8_UPD;
5060  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5061  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5062  case ARM::VST1LNdWB_register_Asm_U16:
5063    return ARM::VST1LNd16_UPD;
5064  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5065  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5066  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5067    return ARM::VST1LNd32_UPD;
5068  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5069  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5070  case ARM::VST1LNdAsm_U8:
5071    return ARM::VST1LNd8;
5072  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5073  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5074  case ARM::VST1LNdAsm_U16:
5075    return ARM::VST1LNd16;
5076  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5077  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5078  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5079    return ARM::VST1LNd32;
5080
5081  // VST2LN
5082  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5083  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5084  case ARM::VST2LNdWB_fixed_Asm_U8:
5085    return ARM::VST2LNd8_UPD;
5086  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5087  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5088  case ARM::VST2LNdWB_fixed_Asm_U16:
5089    return ARM::VST2LNd16_UPD;
5090  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5091  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5092  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5093    return ARM::VST2LNd32_UPD;
5094  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5095  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5096  case ARM::VST2LNdWB_register_Asm_U8:
5097    return ARM::VST2LNd8_UPD;
5098  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5099  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5100  case ARM::VST2LNdWB_register_Asm_U16:
5101    return ARM::VST2LNd16_UPD;
5102  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5103  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5104  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5105    return ARM::VST2LNd32_UPD;
5106  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5107  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5108  case ARM::VST2LNdAsm_U8:
5109    return ARM::VST2LNd8;
5110  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5111  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5112  case ARM::VST2LNdAsm_U16:
5113    return ARM::VST2LNd16;
5114  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5115  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5116  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5117    return ARM::VST2LNd32;
5118  }
5119}
5120
5121static unsigned getRealVLDLNOpcode(unsigned Opc) {
5122  switch(Opc) {
5123  default: assert(0 && "unexpected opcode!");
5124  // VLD1LN
5125  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5126  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5127  case ARM::VLD1LNdWB_fixed_Asm_U8:
5128    return ARM::VLD1LNd8_UPD;
5129  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5130  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5131  case ARM::VLD1LNdWB_fixed_Asm_U16:
5132    return ARM::VLD1LNd16_UPD;
5133  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5134  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5135  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5136    return ARM::VLD1LNd32_UPD;
5137  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5138  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5139  case ARM::VLD1LNdWB_register_Asm_U8:
5140    return ARM::VLD1LNd8_UPD;
5141  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5142  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5143  case ARM::VLD1LNdWB_register_Asm_U16:
5144    return ARM::VLD1LNd16_UPD;
5145  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5146  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5147  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5148    return ARM::VLD1LNd32_UPD;
5149  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5150  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5151  case ARM::VLD1LNdAsm_U8:
5152    return ARM::VLD1LNd8;
5153  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5154  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5155  case ARM::VLD1LNdAsm_U16:
5156    return ARM::VLD1LNd16;
5157  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5158  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5159  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5160    return ARM::VLD1LNd32;
5161
5162  // VLD2LN
5163  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5164  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5165  case ARM::VLD2LNdWB_fixed_Asm_U8:
5166    return ARM::VLD2LNd8_UPD;
5167  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5168  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5169  case ARM::VLD2LNdWB_fixed_Asm_U16:
5170    return ARM::VLD2LNd16_UPD;
5171  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5172  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5173  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5174    return ARM::VLD2LNd32_UPD;
5175  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5176  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5177  case ARM::VLD2LNdWB_register_Asm_U8:
5178    return ARM::VLD2LNd8_UPD;
5179  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5180  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5181  case ARM::VLD2LNdWB_register_Asm_U16:
5182    return ARM::VLD2LNd16_UPD;
5183  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5184  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5185  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5186    return ARM::VLD2LNd32_UPD;
5187  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5188  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5189  case ARM::VLD2LNdAsm_U8:
5190    return ARM::VLD2LNd8;
5191  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5192  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5193  case ARM::VLD2LNdAsm_U16:
5194    return ARM::VLD2LNd16;
5195  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5196  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5197  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5198    return ARM::VLD2LNd32;
5199  }
5200}
5201
5202bool ARMAsmParser::
5203processInstruction(MCInst &Inst,
5204                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5205  switch (Inst.getOpcode()) {
5206  // Handle NEON VST complex aliases.
5207  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5208  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5209  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5210  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5211  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5212  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5213  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5214  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5215    MCInst TmpInst;
5216    // Shuffle the operands around so the lane index operand is in the
5217    // right place.
5218    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5219    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5220    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5221    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5222    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5223    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5224    TmpInst.addOperand(Inst.getOperand(1)); // lane
5225    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5226    TmpInst.addOperand(Inst.getOperand(6));
5227    Inst = TmpInst;
5228    return true;
5229  }
5230
5231  case ARM::VST2LNdWB_register_Asm_8: case ARM::VST2LNdWB_register_Asm_P8:
5232  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5233  case ARM::VST2LNdWB_register_Asm_U8: case ARM::VST2LNdWB_register_Asm_16:
5234  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5235  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5236  case ARM::VST2LNdWB_register_Asm_32: case ARM::VST2LNdWB_register_Asm_F:
5237  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5238  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32: {
5239    MCInst TmpInst;
5240    // Shuffle the operands around so the lane index operand is in the
5241    // right place.
5242    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5243    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5244    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5245    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5246    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5247    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5248    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5249    TmpInst.addOperand(Inst.getOperand(1)); // lane
5250    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5251    TmpInst.addOperand(Inst.getOperand(6));
5252    Inst = TmpInst;
5253    return true;
5254  }
5255  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5256  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5257  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5258  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5259  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5260  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5261  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5262  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5263    MCInst TmpInst;
5264    // Shuffle the operands around so the lane index operand is in the
5265    // right place.
5266    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5267    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5268    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5269    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5270    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5271    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5272    TmpInst.addOperand(Inst.getOperand(1)); // lane
5273    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5274    TmpInst.addOperand(Inst.getOperand(5));
5275    Inst = TmpInst;
5276    return true;
5277  }
5278
5279  case ARM::VST2LNdWB_fixed_Asm_8: case ARM::VST2LNdWB_fixed_Asm_P8:
5280  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5281  case ARM::VST2LNdWB_fixed_Asm_U8: case ARM::VST2LNdWB_fixed_Asm_16:
5282  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5283  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5284  case ARM::VST2LNdWB_fixed_Asm_32: case ARM::VST2LNdWB_fixed_Asm_F:
5285  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5286  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32: {
5287    MCInst TmpInst;
5288    // Shuffle the operands around so the lane index operand is in the
5289    // right place.
5290    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5291    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5292    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5293    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5294    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5295    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5296    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5297    TmpInst.addOperand(Inst.getOperand(1)); // lane
5298    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5299    TmpInst.addOperand(Inst.getOperand(5));
5300    Inst = TmpInst;
5301    return true;
5302  }
5303  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5304  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5305  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5306  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5307  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5308  case ARM::VST1LNdAsm_U32: {
5309    MCInst TmpInst;
5310    // Shuffle the operands around so the lane index operand is in the
5311    // right place.
5312    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5313    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5314    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5315    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5316    TmpInst.addOperand(Inst.getOperand(1)); // lane
5317    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5318    TmpInst.addOperand(Inst.getOperand(5));
5319    Inst = TmpInst;
5320    return true;
5321  }
5322
5323  case ARM::VST2LNdAsm_8: case ARM::VST2LNdAsm_P8: case ARM::VST2LNdAsm_I8:
5324  case ARM::VST2LNdAsm_S8: case ARM::VST2LNdAsm_U8: case ARM::VST2LNdAsm_16:
5325  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5326  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32: case ARM::VST2LNdAsm_F:
5327  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5328  case ARM::VST2LNdAsm_U32: {
5329    MCInst TmpInst;
5330    // Shuffle the operands around so the lane index operand is in the
5331    // right place.
5332    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5333    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5334    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5335    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5336    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5337    TmpInst.addOperand(Inst.getOperand(1)); // lane
5338    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5339    TmpInst.addOperand(Inst.getOperand(5));
5340    Inst = TmpInst;
5341    return true;
5342  }
5343  // Handle NEON VLD complex aliases.
5344  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5345  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5346  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5347  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5348  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5349  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5350  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5351  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5352    MCInst TmpInst;
5353    // Shuffle the operands around so the lane index operand is in the
5354    // right place.
5355    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5356    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5357    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5358    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5359    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5360    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5361    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5362    TmpInst.addOperand(Inst.getOperand(1)); // lane
5363    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5364    TmpInst.addOperand(Inst.getOperand(6));
5365    Inst = TmpInst;
5366    return true;
5367  }
5368
5369  case ARM::VLD2LNdWB_register_Asm_8: case ARM::VLD2LNdWB_register_Asm_P8:
5370  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5371  case ARM::VLD2LNdWB_register_Asm_U8: case ARM::VLD2LNdWB_register_Asm_16:
5372  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5373  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5374  case ARM::VLD2LNdWB_register_Asm_32: case ARM::VLD2LNdWB_register_Asm_F:
5375  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5376  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32: {
5377    MCInst TmpInst;
5378    // Shuffle the operands around so the lane index operand is in the
5379    // right place.
5380    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5381    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5382    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5383    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5384    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5385    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5386    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5387    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5388    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5389    TmpInst.addOperand(Inst.getOperand(1)); // lane
5390    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5391    TmpInst.addOperand(Inst.getOperand(6));
5392    Inst = TmpInst;
5393    return true;
5394  }
5395
5396  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5397  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5398  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5399  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5400  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5401  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5402  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5403  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5404    MCInst TmpInst;
5405    // Shuffle the operands around so the lane index operand is in the
5406    // right place.
5407    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5408    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5409    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5410    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5411    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5412    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5413    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5414    TmpInst.addOperand(Inst.getOperand(1)); // lane
5415    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5416    TmpInst.addOperand(Inst.getOperand(5));
5417    Inst = TmpInst;
5418    return true;
5419  }
5420
5421  case ARM::VLD2LNdWB_fixed_Asm_8: case ARM::VLD2LNdWB_fixed_Asm_P8:
5422  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5423  case ARM::VLD2LNdWB_fixed_Asm_U8: case ARM::VLD2LNdWB_fixed_Asm_16:
5424  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5425  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5426  case ARM::VLD2LNdWB_fixed_Asm_32: case ARM::VLD2LNdWB_fixed_Asm_F:
5427  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5428  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32: {
5429    MCInst TmpInst;
5430    // Shuffle the operands around so the lane index operand is in the
5431    // right place.
5432    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5433    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5434    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5435    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5436    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5437    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5438    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5439    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5440    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5441    TmpInst.addOperand(Inst.getOperand(1)); // lane
5442    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5443    TmpInst.addOperand(Inst.getOperand(5));
5444    Inst = TmpInst;
5445    return true;
5446  }
5447
5448  case ARM::VLD1LNdAsm_8: case ARM::VLD1LNdAsm_P8: case ARM::VLD1LNdAsm_I8:
5449  case ARM::VLD1LNdAsm_S8: case ARM::VLD1LNdAsm_U8: case ARM::VLD1LNdAsm_16:
5450  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5451  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32: case ARM::VLD1LNdAsm_F:
5452  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5453  case ARM::VLD1LNdAsm_U32: {
5454    MCInst TmpInst;
5455    // Shuffle the operands around so the lane index operand is in the
5456    // right place.
5457    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5458    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5459    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5460    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5461    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5462    TmpInst.addOperand(Inst.getOperand(1)); // lane
5463    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5464    TmpInst.addOperand(Inst.getOperand(5));
5465    Inst = TmpInst;
5466    return true;
5467  }
5468
5469  case ARM::VLD2LNdAsm_8: case ARM::VLD2LNdAsm_P8: case ARM::VLD2LNdAsm_I8:
5470  case ARM::VLD2LNdAsm_S8: case ARM::VLD2LNdAsm_U8: case ARM::VLD2LNdAsm_16:
5471  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5472  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32: case ARM::VLD2LNdAsm_F:
5473  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5474  case ARM::VLD2LNdAsm_U32: {
5475    MCInst TmpInst;
5476    // Shuffle the operands around so the lane index operand is in the
5477    // right place.
5478    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5479    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5480    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5481    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5482    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5483    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5484    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5485    TmpInst.addOperand(Inst.getOperand(1)); // lane
5486    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5487    TmpInst.addOperand(Inst.getOperand(5));
5488    Inst = TmpInst;
5489    return true;
5490  }
5491  // Handle the Thumb2 mode MOV complex aliases.
5492  case ARM::t2MOVsi:
5493  case ARM::t2MOVSsi: {
5494    // Which instruction to expand to depends on the CCOut operand and
5495    // whether we're in an IT block if the register operands are low
5496    // registers.
5497    bool isNarrow = false;
5498    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5499        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5500        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5501      isNarrow = true;
5502    MCInst TmpInst;
5503    unsigned newOpc;
5504    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5505    default: llvm_unreachable("unexpected opcode!");
5506    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5507    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5508    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5509    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5510    }
5511    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5512    if (Ammount == 32) Ammount = 0;
5513    TmpInst.setOpcode(newOpc);
5514    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5515    if (isNarrow)
5516      TmpInst.addOperand(MCOperand::CreateReg(
5517          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5518    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5519    TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5520    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5521    TmpInst.addOperand(Inst.getOperand(4));
5522    if (!isNarrow)
5523      TmpInst.addOperand(MCOperand::CreateReg(
5524          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5525    Inst = TmpInst;
5526    return true;
5527  }
5528  // Handle the ARM mode MOV complex aliases.
5529  case ARM::ASRr:
5530  case ARM::LSRr:
5531  case ARM::LSLr:
5532  case ARM::RORr: {
5533    ARM_AM::ShiftOpc ShiftTy;
5534    switch(Inst.getOpcode()) {
5535    default: llvm_unreachable("unexpected opcode!");
5536    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5537    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5538    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5539    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5540    }
5541    // A shift by zero is a plain MOVr, not a MOVsi.
5542    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5543    MCInst TmpInst;
5544    TmpInst.setOpcode(ARM::MOVsr);
5545    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5546    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5547    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5548    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5549    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5550    TmpInst.addOperand(Inst.getOperand(4));
5551    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5552    Inst = TmpInst;
5553    return true;
5554  }
5555  case ARM::ASRi:
5556  case ARM::LSRi:
5557  case ARM::LSLi:
5558  case ARM::RORi: {
5559    ARM_AM::ShiftOpc ShiftTy;
5560    switch(Inst.getOpcode()) {
5561    default: llvm_unreachable("unexpected opcode!");
5562    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5563    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5564    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5565    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5566    }
5567    // A shift by zero is a plain MOVr, not a MOVsi.
5568    unsigned Amt = Inst.getOperand(2).getImm();
5569    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5570    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5571    MCInst TmpInst;
5572    TmpInst.setOpcode(Opc);
5573    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5574    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5575    if (Opc == ARM::MOVsi)
5576      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5577    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5578    TmpInst.addOperand(Inst.getOperand(4));
5579    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5580    Inst = TmpInst;
5581    return true;
5582  }
5583  case ARM::RRXi: {
5584    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5585    MCInst TmpInst;
5586    TmpInst.setOpcode(ARM::MOVsi);
5587    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5588    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5589    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5590    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5591    TmpInst.addOperand(Inst.getOperand(3));
5592    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5593    Inst = TmpInst;
5594    return true;
5595  }
5596  case ARM::t2LDMIA_UPD: {
5597    // If this is a load of a single register, then we should use
5598    // a post-indexed LDR instruction instead, per the ARM ARM.
5599    if (Inst.getNumOperands() != 5)
5600      return false;
5601    MCInst TmpInst;
5602    TmpInst.setOpcode(ARM::t2LDR_POST);
5603    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5604    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5605    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5606    TmpInst.addOperand(MCOperand::CreateImm(4));
5607    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5608    TmpInst.addOperand(Inst.getOperand(3));
5609    Inst = TmpInst;
5610    return true;
5611  }
5612  case ARM::t2STMDB_UPD: {
5613    // If this is a store of a single register, then we should use
5614    // a pre-indexed STR instruction instead, per the ARM ARM.
5615    if (Inst.getNumOperands() != 5)
5616      return false;
5617    MCInst TmpInst;
5618    TmpInst.setOpcode(ARM::t2STR_PRE);
5619    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5620    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5621    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5622    TmpInst.addOperand(MCOperand::CreateImm(-4));
5623    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5624    TmpInst.addOperand(Inst.getOperand(3));
5625    Inst = TmpInst;
5626    return true;
5627  }
5628  case ARM::LDMIA_UPD:
5629    // If this is a load of a single register via a 'pop', then we should use
5630    // a post-indexed LDR instruction instead, per the ARM ARM.
5631    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5632        Inst.getNumOperands() == 5) {
5633      MCInst TmpInst;
5634      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5635      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5636      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5637      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5638      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5639      TmpInst.addOperand(MCOperand::CreateImm(4));
5640      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5641      TmpInst.addOperand(Inst.getOperand(3));
5642      Inst = TmpInst;
5643      return true;
5644    }
5645    break;
5646  case ARM::STMDB_UPD:
5647    // If this is a store of a single register via a 'push', then we should use
5648    // a pre-indexed STR instruction instead, per the ARM ARM.
5649    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5650        Inst.getNumOperands() == 5) {
5651      MCInst TmpInst;
5652      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5653      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5654      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5655      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5656      TmpInst.addOperand(MCOperand::CreateImm(-4));
5657      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5658      TmpInst.addOperand(Inst.getOperand(3));
5659      Inst = TmpInst;
5660    }
5661    break;
5662  case ARM::t2ADDri12:
5663    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5664    // mnemonic was used (not "addw"), encoding T3 is preferred.
5665    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5666        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5667      break;
5668    Inst.setOpcode(ARM::t2ADDri);
5669    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5670    break;
5671  case ARM::t2SUBri12:
5672    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5673    // mnemonic was used (not "subw"), encoding T3 is preferred.
5674    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5675        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5676      break;
5677    Inst.setOpcode(ARM::t2SUBri);
5678    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5679    break;
5680  case ARM::tADDi8:
5681    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5682    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5683    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5684    // to encoding T1 if <Rd> is omitted."
5685    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5686      Inst.setOpcode(ARM::tADDi3);
5687      return true;
5688    }
5689    break;
5690  case ARM::tSUBi8:
5691    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5692    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5693    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5694    // to encoding T1 if <Rd> is omitted."
5695    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5696      Inst.setOpcode(ARM::tSUBi3);
5697      return true;
5698    }
5699    break;
5700  case ARM::t2ADDrr: {
5701    // If the destination and first source operand are the same, and
5702    // there's no setting of the flags, use encoding T2 instead of T3.
5703    // Note that this is only for ADD, not SUB. This mirrors the system
5704    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5705    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5706        Inst.getOperand(5).getReg() != 0 ||
5707        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5708         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5709      break;
5710    MCInst TmpInst;
5711    TmpInst.setOpcode(ARM::tADDhirr);
5712    TmpInst.addOperand(Inst.getOperand(0));
5713    TmpInst.addOperand(Inst.getOperand(0));
5714    TmpInst.addOperand(Inst.getOperand(2));
5715    TmpInst.addOperand(Inst.getOperand(3));
5716    TmpInst.addOperand(Inst.getOperand(4));
5717    Inst = TmpInst;
5718    return true;
5719  }
5720  case ARM::tB:
5721    // A Thumb conditional branch outside of an IT block is a tBcc.
5722    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5723      Inst.setOpcode(ARM::tBcc);
5724      return true;
5725    }
5726    break;
5727  case ARM::t2B:
5728    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5729    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5730      Inst.setOpcode(ARM::t2Bcc);
5731      return true;
5732    }
5733    break;
5734  case ARM::t2Bcc:
5735    // If the conditional is AL or we're in an IT block, we really want t2B.
5736    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5737      Inst.setOpcode(ARM::t2B);
5738      return true;
5739    }
5740    break;
5741  case ARM::tBcc:
5742    // If the conditional is AL, we really want tB.
5743    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5744      Inst.setOpcode(ARM::tB);
5745      return true;
5746    }
5747    break;
5748  case ARM::tLDMIA: {
5749    // If the register list contains any high registers, or if the writeback
5750    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
5751    // instead if we're in Thumb2. Otherwise, this should have generated
5752    // an error in validateInstruction().
5753    unsigned Rn = Inst.getOperand(0).getReg();
5754    bool hasWritebackToken =
5755      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5756       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5757    bool listContainsBase;
5758    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
5759        (!listContainsBase && !hasWritebackToken) ||
5760        (listContainsBase && hasWritebackToken)) {
5761      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5762      assert (isThumbTwo());
5763      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
5764      // If we're switching to the updating version, we need to insert
5765      // the writeback tied operand.
5766      if (hasWritebackToken)
5767        Inst.insert(Inst.begin(),
5768                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
5769      return true;
5770    }
5771    break;
5772  }
5773  case ARM::tSTMIA_UPD: {
5774    // If the register list contains any high registers, we need to use
5775    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5776    // should have generated an error in validateInstruction().
5777    unsigned Rn = Inst.getOperand(0).getReg();
5778    bool listContainsBase;
5779    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
5780      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5781      assert (isThumbTwo());
5782      Inst.setOpcode(ARM::t2STMIA_UPD);
5783      return true;
5784    }
5785    break;
5786  }
5787  case ARM::tPOP: {
5788    bool listContainsBase;
5789    // If the register list contains any high registers, we need to use
5790    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5791    // should have generated an error in validateInstruction().
5792    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
5793      return false;
5794    assert (isThumbTwo());
5795    Inst.setOpcode(ARM::t2LDMIA_UPD);
5796    // Add the base register and writeback operands.
5797    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5798    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5799    return true;
5800  }
5801  case ARM::tPUSH: {
5802    bool listContainsBase;
5803    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
5804      return false;
5805    assert (isThumbTwo());
5806    Inst.setOpcode(ARM::t2STMDB_UPD);
5807    // Add the base register and writeback operands.
5808    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5809    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5810    return true;
5811  }
5812  case ARM::t2MOVi: {
5813    // If we can use the 16-bit encoding and the user didn't explicitly
5814    // request the 32-bit variant, transform it here.
5815    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5816        Inst.getOperand(1).getImm() <= 255 &&
5817        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
5818         Inst.getOperand(4).getReg() == ARM::CPSR) ||
5819        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
5820        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5821         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5822      // The operands aren't in the same order for tMOVi8...
5823      MCInst TmpInst;
5824      TmpInst.setOpcode(ARM::tMOVi8);
5825      TmpInst.addOperand(Inst.getOperand(0));
5826      TmpInst.addOperand(Inst.getOperand(4));
5827      TmpInst.addOperand(Inst.getOperand(1));
5828      TmpInst.addOperand(Inst.getOperand(2));
5829      TmpInst.addOperand(Inst.getOperand(3));
5830      Inst = TmpInst;
5831      return true;
5832    }
5833    break;
5834  }
5835  case ARM::t2MOVr: {
5836    // If we can use the 16-bit encoding and the user didn't explicitly
5837    // request the 32-bit variant, transform it here.
5838    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5839        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5840        Inst.getOperand(2).getImm() == ARMCC::AL &&
5841        Inst.getOperand(4).getReg() == ARM::CPSR &&
5842        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5843         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5844      // The operands aren't the same for tMOV[S]r... (no cc_out)
5845      MCInst TmpInst;
5846      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
5847      TmpInst.addOperand(Inst.getOperand(0));
5848      TmpInst.addOperand(Inst.getOperand(1));
5849      TmpInst.addOperand(Inst.getOperand(2));
5850      TmpInst.addOperand(Inst.getOperand(3));
5851      Inst = TmpInst;
5852      return true;
5853    }
5854    break;
5855  }
5856  case ARM::t2SXTH:
5857  case ARM::t2SXTB:
5858  case ARM::t2UXTH:
5859  case ARM::t2UXTB: {
5860    // If we can use the 16-bit encoding and the user didn't explicitly
5861    // request the 32-bit variant, transform it here.
5862    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5863        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5864        Inst.getOperand(2).getImm() == 0 &&
5865        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5866         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5867      unsigned NewOpc;
5868      switch (Inst.getOpcode()) {
5869      default: llvm_unreachable("Illegal opcode!");
5870      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
5871      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
5872      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
5873      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
5874      }
5875      // The operands aren't the same for thumb1 (no rotate operand).
5876      MCInst TmpInst;
5877      TmpInst.setOpcode(NewOpc);
5878      TmpInst.addOperand(Inst.getOperand(0));
5879      TmpInst.addOperand(Inst.getOperand(1));
5880      TmpInst.addOperand(Inst.getOperand(3));
5881      TmpInst.addOperand(Inst.getOperand(4));
5882      Inst = TmpInst;
5883      return true;
5884    }
5885    break;
5886  }
5887  case ARM::t2IT: {
5888    // The mask bits for all but the first condition are represented as
5889    // the low bit of the condition code value implies 't'. We currently
5890    // always have 1 implies 't', so XOR toggle the bits if the low bit
5891    // of the condition code is zero. The encoding also expects the low
5892    // bit of the condition to be encoded as bit 4 of the mask operand,
5893    // so mask that in if needed
5894    MCOperand &MO = Inst.getOperand(1);
5895    unsigned Mask = MO.getImm();
5896    unsigned OrigMask = Mask;
5897    unsigned TZ = CountTrailingZeros_32(Mask);
5898    if ((Inst.getOperand(0).getImm() & 1) == 0) {
5899      assert(Mask && TZ <= 3 && "illegal IT mask value!");
5900      for (unsigned i = 3; i != TZ; --i)
5901        Mask ^= 1 << i;
5902    } else
5903      Mask |= 0x10;
5904    MO.setImm(Mask);
5905
5906    // Set up the IT block state according to the IT instruction we just
5907    // matched.
5908    assert(!inITBlock() && "nested IT blocks?!");
5909    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
5910    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
5911    ITState.CurPosition = 0;
5912    ITState.FirstCond = true;
5913    break;
5914  }
5915  }
5916  return false;
5917}
5918
5919unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
5920  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
5921  // suffix depending on whether they're in an IT block or not.
5922  unsigned Opc = Inst.getOpcode();
5923  const MCInstrDesc &MCID = getInstDesc(Opc);
5924  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
5925    assert(MCID.hasOptionalDef() &&
5926           "optionally flag setting instruction missing optional def operand");
5927    assert(MCID.NumOperands == Inst.getNumOperands() &&
5928           "operand count mismatch!");
5929    // Find the optional-def operand (cc_out).
5930    unsigned OpNo;
5931    for (OpNo = 0;
5932         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
5933         ++OpNo)
5934      ;
5935    // If we're parsing Thumb1, reject it completely.
5936    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
5937      return Match_MnemonicFail;
5938    // If we're parsing Thumb2, which form is legal depends on whether we're
5939    // in an IT block.
5940    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
5941        !inITBlock())
5942      return Match_RequiresITBlock;
5943    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
5944        inITBlock())
5945      return Match_RequiresNotITBlock;
5946  }
5947  // Some high-register supporting Thumb1 encodings only allow both registers
5948  // to be from r0-r7 when in Thumb2.
5949  else if (Opc == ARM::tADDhirr && isThumbOne() &&
5950           isARMLowRegister(Inst.getOperand(1).getReg()) &&
5951           isARMLowRegister(Inst.getOperand(2).getReg()))
5952    return Match_RequiresThumb2;
5953  // Others only require ARMv6 or later.
5954  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
5955           isARMLowRegister(Inst.getOperand(0).getReg()) &&
5956           isARMLowRegister(Inst.getOperand(1).getReg()))
5957    return Match_RequiresV6;
5958  return Match_Success;
5959}
5960
5961bool ARMAsmParser::
5962MatchAndEmitInstruction(SMLoc IDLoc,
5963                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
5964                        MCStreamer &Out) {
5965  MCInst Inst;
5966  unsigned ErrorInfo;
5967  unsigned MatchResult;
5968  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
5969  switch (MatchResult) {
5970  default: break;
5971  case Match_Success:
5972    // Context sensitive operand constraints aren't handled by the matcher,
5973    // so check them here.
5974    if (validateInstruction(Inst, Operands)) {
5975      // Still progress the IT block, otherwise one wrong condition causes
5976      // nasty cascading errors.
5977      forwardITPosition();
5978      return true;
5979    }
5980
5981    // Some instructions need post-processing to, for example, tweak which
5982    // encoding is selected. Loop on it while changes happen so the
5983    // individual transformations can chain off each other. E.g.,
5984    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
5985    while (processInstruction(Inst, Operands))
5986      ;
5987
5988    // Only move forward at the very end so that everything in validate
5989    // and process gets a consistent answer about whether we're in an IT
5990    // block.
5991    forwardITPosition();
5992
5993    Out.EmitInstruction(Inst);
5994    return false;
5995  case Match_MissingFeature:
5996    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
5997    return true;
5998  case Match_InvalidOperand: {
5999    SMLoc ErrorLoc = IDLoc;
6000    if (ErrorInfo != ~0U) {
6001      if (ErrorInfo >= Operands.size())
6002        return Error(IDLoc, "too few operands for instruction");
6003
6004      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6005      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6006    }
6007
6008    return Error(ErrorLoc, "invalid operand for instruction");
6009  }
6010  case Match_MnemonicFail:
6011    return Error(IDLoc, "invalid instruction");
6012  case Match_ConversionFail:
6013    // The converter function will have already emited a diagnostic.
6014    return true;
6015  case Match_RequiresNotITBlock:
6016    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6017  case Match_RequiresITBlock:
6018    return Error(IDLoc, "instruction only valid inside IT block");
6019  case Match_RequiresV6:
6020    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6021  case Match_RequiresThumb2:
6022    return Error(IDLoc, "instruction variant requires Thumb2");
6023  }
6024
6025  llvm_unreachable("Implement any new match types added!");
6026  return true;
6027}
6028
6029/// parseDirective parses the arm specific directives
6030bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6031  StringRef IDVal = DirectiveID.getIdentifier();
6032  if (IDVal == ".word")
6033    return parseDirectiveWord(4, DirectiveID.getLoc());
6034  else if (IDVal == ".thumb")
6035    return parseDirectiveThumb(DirectiveID.getLoc());
6036  else if (IDVal == ".arm")
6037    return parseDirectiveARM(DirectiveID.getLoc());
6038  else if (IDVal == ".thumb_func")
6039    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6040  else if (IDVal == ".code")
6041    return parseDirectiveCode(DirectiveID.getLoc());
6042  else if (IDVal == ".syntax")
6043    return parseDirectiveSyntax(DirectiveID.getLoc());
6044  else if (IDVal == ".unreq")
6045    return parseDirectiveUnreq(DirectiveID.getLoc());
6046  return true;
6047}
6048
6049/// parseDirectiveWord
6050///  ::= .word [ expression (, expression)* ]
6051bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6052  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6053    for (;;) {
6054      const MCExpr *Value;
6055      if (getParser().ParseExpression(Value))
6056        return true;
6057
6058      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6059
6060      if (getLexer().is(AsmToken::EndOfStatement))
6061        break;
6062
6063      // FIXME: Improve diagnostic.
6064      if (getLexer().isNot(AsmToken::Comma))
6065        return Error(L, "unexpected token in directive");
6066      Parser.Lex();
6067    }
6068  }
6069
6070  Parser.Lex();
6071  return false;
6072}
6073
6074/// parseDirectiveThumb
6075///  ::= .thumb
6076bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6077  if (getLexer().isNot(AsmToken::EndOfStatement))
6078    return Error(L, "unexpected token in directive");
6079  Parser.Lex();
6080
6081  if (!isThumb())
6082    SwitchMode();
6083  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6084  return false;
6085}
6086
6087/// parseDirectiveARM
6088///  ::= .arm
6089bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6090  if (getLexer().isNot(AsmToken::EndOfStatement))
6091    return Error(L, "unexpected token in directive");
6092  Parser.Lex();
6093
6094  if (isThumb())
6095    SwitchMode();
6096  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6097  return false;
6098}
6099
6100/// parseDirectiveThumbFunc
6101///  ::= .thumbfunc symbol_name
6102bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6103  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6104  bool isMachO = MAI.hasSubsectionsViaSymbols();
6105  StringRef Name;
6106
6107  // Darwin asm has function name after .thumb_func direction
6108  // ELF doesn't
6109  if (isMachO) {
6110    const AsmToken &Tok = Parser.getTok();
6111    if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6112      return Error(L, "unexpected token in .thumb_func directive");
6113    Name = Tok.getIdentifier();
6114    Parser.Lex(); // Consume the identifier token.
6115  }
6116
6117 if (getLexer().isNot(AsmToken::EndOfStatement))
6118    return Error(L, "unexpected token in directive");
6119  Parser.Lex();
6120
6121  // FIXME: assuming function name will be the line following .thumb_func
6122  if (!isMachO) {
6123    Name = Parser.getTok().getIdentifier();
6124  }
6125
6126  // Mark symbol as a thumb symbol.
6127  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6128  getParser().getStreamer().EmitThumbFunc(Func);
6129  return false;
6130}
6131
6132/// parseDirectiveSyntax
6133///  ::= .syntax unified | divided
6134bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6135  const AsmToken &Tok = Parser.getTok();
6136  if (Tok.isNot(AsmToken::Identifier))
6137    return Error(L, "unexpected token in .syntax directive");
6138  StringRef Mode = Tok.getString();
6139  if (Mode == "unified" || Mode == "UNIFIED")
6140    Parser.Lex();
6141  else if (Mode == "divided" || Mode == "DIVIDED")
6142    return Error(L, "'.syntax divided' arm asssembly not supported");
6143  else
6144    return Error(L, "unrecognized syntax mode in .syntax directive");
6145
6146  if (getLexer().isNot(AsmToken::EndOfStatement))
6147    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6148  Parser.Lex();
6149
6150  // TODO tell the MC streamer the mode
6151  // getParser().getStreamer().Emit???();
6152  return false;
6153}
6154
6155/// parseDirectiveCode
6156///  ::= .code 16 | 32
6157bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6158  const AsmToken &Tok = Parser.getTok();
6159  if (Tok.isNot(AsmToken::Integer))
6160    return Error(L, "unexpected token in .code directive");
6161  int64_t Val = Parser.getTok().getIntVal();
6162  if (Val == 16)
6163    Parser.Lex();
6164  else if (Val == 32)
6165    Parser.Lex();
6166  else
6167    return Error(L, "invalid operand to .code directive");
6168
6169  if (getLexer().isNot(AsmToken::EndOfStatement))
6170    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6171  Parser.Lex();
6172
6173  if (Val == 16) {
6174    if (!isThumb())
6175      SwitchMode();
6176    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6177  } else {
6178    if (isThumb())
6179      SwitchMode();
6180    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6181  }
6182
6183  return false;
6184}
6185
6186/// parseDirectiveReq
6187///  ::= name .req registername
6188bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6189  Parser.Lex(); // Eat the '.req' token.
6190  unsigned Reg;
6191  SMLoc SRegLoc, ERegLoc;
6192  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6193    Parser.EatToEndOfStatement();
6194    return Error(SRegLoc, "register name expected");
6195  }
6196
6197  // Shouldn't be anything else.
6198  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6199    Parser.EatToEndOfStatement();
6200    return Error(Parser.getTok().getLoc(),
6201                 "unexpected input in .req directive.");
6202  }
6203
6204  Parser.Lex(); // Consume the EndOfStatement
6205
6206  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6207    return Error(SRegLoc, "redefinition of '" + Name +
6208                          "' does not match original.");
6209
6210  return false;
6211}
6212
6213/// parseDirectiveUneq
6214///  ::= .unreq registername
6215bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6216  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6217    Parser.EatToEndOfStatement();
6218    return Error(L, "unexpected input in .unreq directive.");
6219  }
6220  RegisterReqs.erase(Parser.getTok().getIdentifier());
6221  Parser.Lex(); // Eat the identifier.
6222  return false;
6223}
6224
6225extern "C" void LLVMInitializeARMAsmLexer();
6226
6227/// Force static initialization.
6228extern "C" void LLVMInitializeARMAsmParser() {
6229  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6230  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6231  LLVMInitializeARMAsmLexer();
6232}
6233
6234#define GET_REGISTER_MATCHER
6235#define GET_MATCHER_IMPLEMENTATION
6236#include "ARMGenAsmMatcher.inc"
6237