ARMAsmParser.cpp revision 0aaf4cd9b34454eb381e1694f520504779c6b7f8
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104
105  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
106                          bool &CarrySetting, unsigned &ProcessorIMod,
107                          StringRef &ITMask);
108  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
109                             bool &CanAcceptPredicationCode);
110
111  bool isThumb() const {
112    // FIXME: Can tablegen auto-generate this?
113    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
114  }
115  bool isThumbOne() const {
116    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
117  }
118  bool isThumbTwo() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
120  }
121  bool hasV6Ops() const {
122    return STI.getFeatureBits() & ARM::HasV6Ops;
123  }
124  bool hasV7Ops() const {
125    return STI.getFeatureBits() & ARM::HasV7Ops;
126  }
127  void SwitchMode() {
128    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
129    setAvailableFeatures(FB);
130  }
131  bool isMClass() const {
132    return STI.getFeatureBits() & ARM::FeatureMClass;
133  }
134
135  /// @name Auto-generated Match Functions
136  /// {
137
138#define GET_ASSEMBLER_HEADER
139#include "ARMGenAsmMatcher.inc"
140
141  /// }
142
143  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
144  OperandMatchResultTy parseCoprocNumOperand(
145    SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocRegOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocOptionOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseMemBarrierOptOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseProcIFlagsOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseMSRMaskOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
157                                   StringRef Op, int Low, int High);
158  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
159    return parsePKHImm(O, "lsl", 0, 31);
160  }
161  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "asr", 1, 32);
163  }
164  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
173
174  // Asm Match Converter Methods
175  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
176                    const SmallVectorImpl<MCParsedAsmOperand*> &);
177  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
180                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
194                             const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
202                  const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
206                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
208                        const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
210                     const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
212                        const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
214                     const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
216                        const SmallVectorImpl<MCParsedAsmOperand*> &);
217
218  bool validateInstruction(MCInst &Inst,
219                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
220  bool processInstruction(MCInst &Inst,
221                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool shouldOmitCCOutOperand(StringRef Mnemonic,
223                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
224
225public:
226  enum ARMMatchResultTy {
227    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
228    Match_RequiresNotITBlock,
229    Match_RequiresV6,
230    Match_RequiresThumb2
231  };
232
233  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
234    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
235    MCAsmParserExtension::Initialize(_Parser);
236
237    // Initialize the set of available features.
238    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
239
240    // Not in an ITBlock to start with.
241    ITState.CurPosition = ~0U;
242  }
243
244  // Implementation of the MCTargetAsmParser interface:
245  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
246  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
247                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
248  bool ParseDirective(AsmToken DirectiveID);
249
250  unsigned checkTargetMatchPredicate(MCInst &Inst);
251
252  bool MatchAndEmitInstruction(SMLoc IDLoc,
253                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
254                               MCStreamer &Out);
255};
256} // end anonymous namespace
257
258namespace {
259
260/// ARMOperand - Instances of this class represent a parsed ARM machine
261/// instruction.
262class ARMOperand : public MCParsedAsmOperand {
263  enum KindTy {
264    k_CondCode,
265    k_CCOut,
266    k_ITCondMask,
267    k_CoprocNum,
268    k_CoprocReg,
269    k_CoprocOption,
270    k_Immediate,
271    k_FPImmediate,
272    k_MemBarrierOpt,
273    k_Memory,
274    k_PostIndexRegister,
275    k_MSRMask,
276    k_ProcIFlags,
277    k_VectorIndex,
278    k_Register,
279    k_RegisterList,
280    k_DPRRegisterList,
281    k_SPRRegisterList,
282    k_VectorList,
283    k_VectorListAllLanes,
284    k_VectorListIndexed,
285    k_ShiftedRegister,
286    k_ShiftedImmediate,
287    k_ShifterImmediate,
288    k_RotateImmediate,
289    k_BitfieldDescriptor,
290    k_Token
291  } Kind;
292
293  SMLoc StartLoc, EndLoc;
294  SmallVector<unsigned, 8> Registers;
295
296  union {
297    struct {
298      ARMCC::CondCodes Val;
299    } CC;
300
301    struct {
302      unsigned Val;
303    } Cop;
304
305    struct {
306      unsigned Val;
307    } CoprocOption;
308
309    struct {
310      unsigned Mask:4;
311    } ITMask;
312
313    struct {
314      ARM_MB::MemBOpt Val;
315    } MBOpt;
316
317    struct {
318      ARM_PROC::IFlags Val;
319    } IFlags;
320
321    struct {
322      unsigned Val;
323    } MMask;
324
325    struct {
326      const char *Data;
327      unsigned Length;
328    } Tok;
329
330    struct {
331      unsigned RegNum;
332    } Reg;
333
334    // A vector register list is a sequential list of 1 to 4 registers.
335    struct {
336      unsigned RegNum;
337      unsigned Count;
338      unsigned LaneIndex;
339      bool isDoubleSpaced;
340    } VectorList;
341
342    struct {
343      unsigned Val;
344    } VectorIndex;
345
346    struct {
347      const MCExpr *Val;
348    } Imm;
349
350    struct {
351      unsigned Val;       // encoded 8-bit representation
352    } FPImm;
353
354    /// Combined record for all forms of ARM address expressions.
355    struct {
356      unsigned BaseRegNum;
357      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
358      // was specified.
359      const MCConstantExpr *OffsetImm;  // Offset immediate value
360      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
361      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
362      unsigned ShiftImm;        // shift for OffsetReg.
363      unsigned Alignment;       // 0 = no alignment specified
364                                // n = alignment in bytes (8, 16, or 32)
365      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
366    } Memory;
367
368    struct {
369      unsigned RegNum;
370      bool isAdd;
371      ARM_AM::ShiftOpc ShiftTy;
372      unsigned ShiftImm;
373    } PostIdxReg;
374
375    struct {
376      bool isASR;
377      unsigned Imm;
378    } ShifterImm;
379    struct {
380      ARM_AM::ShiftOpc ShiftTy;
381      unsigned SrcReg;
382      unsigned ShiftReg;
383      unsigned ShiftImm;
384    } RegShiftedReg;
385    struct {
386      ARM_AM::ShiftOpc ShiftTy;
387      unsigned SrcReg;
388      unsigned ShiftImm;
389    } RegShiftedImm;
390    struct {
391      unsigned Imm;
392    } RotImm;
393    struct {
394      unsigned LSB;
395      unsigned Width;
396    } Bitfield;
397  };
398
399  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
400public:
401  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
402    Kind = o.Kind;
403    StartLoc = o.StartLoc;
404    EndLoc = o.EndLoc;
405    switch (Kind) {
406    case k_CondCode:
407      CC = o.CC;
408      break;
409    case k_ITCondMask:
410      ITMask = o.ITMask;
411      break;
412    case k_Token:
413      Tok = o.Tok;
414      break;
415    case k_CCOut:
416    case k_Register:
417      Reg = o.Reg;
418      break;
419    case k_RegisterList:
420    case k_DPRRegisterList:
421    case k_SPRRegisterList:
422      Registers = o.Registers;
423      break;
424    case k_VectorList:
425    case k_VectorListAllLanes:
426    case k_VectorListIndexed:
427      VectorList = o.VectorList;
428      break;
429    case k_CoprocNum:
430    case k_CoprocReg:
431      Cop = o.Cop;
432      break;
433    case k_CoprocOption:
434      CoprocOption = o.CoprocOption;
435      break;
436    case k_Immediate:
437      Imm = o.Imm;
438      break;
439    case k_FPImmediate:
440      FPImm = o.FPImm;
441      break;
442    case k_MemBarrierOpt:
443      MBOpt = o.MBOpt;
444      break;
445    case k_Memory:
446      Memory = o.Memory;
447      break;
448    case k_PostIndexRegister:
449      PostIdxReg = o.PostIdxReg;
450      break;
451    case k_MSRMask:
452      MMask = o.MMask;
453      break;
454    case k_ProcIFlags:
455      IFlags = o.IFlags;
456      break;
457    case k_ShifterImmediate:
458      ShifterImm = o.ShifterImm;
459      break;
460    case k_ShiftedRegister:
461      RegShiftedReg = o.RegShiftedReg;
462      break;
463    case k_ShiftedImmediate:
464      RegShiftedImm = o.RegShiftedImm;
465      break;
466    case k_RotateImmediate:
467      RotImm = o.RotImm;
468      break;
469    case k_BitfieldDescriptor:
470      Bitfield = o.Bitfield;
471      break;
472    case k_VectorIndex:
473      VectorIndex = o.VectorIndex;
474      break;
475    }
476  }
477
478  /// getStartLoc - Get the location of the first token of this operand.
479  SMLoc getStartLoc() const { return StartLoc; }
480  /// getEndLoc - Get the location of the last token of this operand.
481  SMLoc getEndLoc() const { return EndLoc; }
482
483  ARMCC::CondCodes getCondCode() const {
484    assert(Kind == k_CondCode && "Invalid access!");
485    return CC.Val;
486  }
487
488  unsigned getCoproc() const {
489    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
490    return Cop.Val;
491  }
492
493  StringRef getToken() const {
494    assert(Kind == k_Token && "Invalid access!");
495    return StringRef(Tok.Data, Tok.Length);
496  }
497
498  unsigned getReg() const {
499    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
500    return Reg.RegNum;
501  }
502
503  const SmallVectorImpl<unsigned> &getRegList() const {
504    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
505            Kind == k_SPRRegisterList) && "Invalid access!");
506    return Registers;
507  }
508
509  const MCExpr *getImm() const {
510    assert(Kind == k_Immediate && "Invalid access!");
511    return Imm.Val;
512  }
513
514  unsigned getFPImm() const {
515    assert(Kind == k_FPImmediate && "Invalid access!");
516    return FPImm.Val;
517  }
518
519  unsigned getVectorIndex() const {
520    assert(Kind == k_VectorIndex && "Invalid access!");
521    return VectorIndex.Val;
522  }
523
524  ARM_MB::MemBOpt getMemBarrierOpt() const {
525    assert(Kind == k_MemBarrierOpt && "Invalid access!");
526    return MBOpt.Val;
527  }
528
529  ARM_PROC::IFlags getProcIFlags() const {
530    assert(Kind == k_ProcIFlags && "Invalid access!");
531    return IFlags.Val;
532  }
533
534  unsigned getMSRMask() const {
535    assert(Kind == k_MSRMask && "Invalid access!");
536    return MMask.Val;
537  }
538
539  bool isCoprocNum() const { return Kind == k_CoprocNum; }
540  bool isCoprocReg() const { return Kind == k_CoprocReg; }
541  bool isCoprocOption() const { return Kind == k_CoprocOption; }
542  bool isCondCode() const { return Kind == k_CondCode; }
543  bool isCCOut() const { return Kind == k_CCOut; }
544  bool isITMask() const { return Kind == k_ITCondMask; }
545  bool isITCondCode() const { return Kind == k_CondCode; }
546  bool isImm() const { return Kind == k_Immediate; }
547  bool isFPImm() const { return Kind == k_FPImmediate; }
548  bool isImm8s4() const {
549    if (Kind != k_Immediate)
550      return false;
551    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
552    if (!CE) return false;
553    int64_t Value = CE->getValue();
554    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
555  }
556  bool isImm0_1020s4() const {
557    if (Kind != k_Immediate)
558      return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
563  }
564  bool isImm0_508s4() const {
565    if (Kind != k_Immediate)
566      return false;
567    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
568    if (!CE) return false;
569    int64_t Value = CE->getValue();
570    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
571  }
572  bool isImm0_255() const {
573    if (Kind != k_Immediate)
574      return false;
575    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
576    if (!CE) return false;
577    int64_t Value = CE->getValue();
578    return Value >= 0 && Value < 256;
579  }
580  bool isImm0_1() const {
581    if (Kind != k_Immediate)
582      return false;
583    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
584    if (!CE) return false;
585    int64_t Value = CE->getValue();
586    return Value >= 0 && Value < 2;
587  }
588  bool isImm0_3() const {
589    if (Kind != k_Immediate)
590      return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 4;
595  }
596  bool isImm0_7() const {
597    if (Kind != k_Immediate)
598      return false;
599    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
600    if (!CE) return false;
601    int64_t Value = CE->getValue();
602    return Value >= 0 && Value < 8;
603  }
604  bool isImm0_15() const {
605    if (Kind != k_Immediate)
606      return false;
607    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
608    if (!CE) return false;
609    int64_t Value = CE->getValue();
610    return Value >= 0 && Value < 16;
611  }
612  bool isImm0_31() const {
613    if (Kind != k_Immediate)
614      return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (Kind != k_Immediate)
622      return false;
623    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
624    if (!CE) return false;
625    int64_t Value = CE->getValue();
626    return Value >= 0 && Value < 64;
627  }
628  bool isImm8() const {
629    if (Kind != k_Immediate)
630      return false;
631    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
632    if (!CE) return false;
633    int64_t Value = CE->getValue();
634    return Value == 8;
635  }
636  bool isImm16() const {
637    if (Kind != k_Immediate)
638      return false;
639    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
640    if (!CE) return false;
641    int64_t Value = CE->getValue();
642    return Value == 16;
643  }
644  bool isImm32() const {
645    if (Kind != k_Immediate)
646      return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (Kind != k_Immediate)
654      return false;
655    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
656    if (!CE) return false;
657    int64_t Value = CE->getValue();
658    return Value > 0 && Value <= 8;
659  }
660  bool isShrImm16() const {
661    if (Kind != k_Immediate)
662      return false;
663    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664    if (!CE) return false;
665    int64_t Value = CE->getValue();
666    return Value > 0 && Value <= 16;
667  }
668  bool isShrImm32() const {
669    if (Kind != k_Immediate)
670      return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 32;
675  }
676  bool isShrImm64() const {
677    if (Kind != k_Immediate)
678      return false;
679    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
680    if (!CE) return false;
681    int64_t Value = CE->getValue();
682    return Value > 0 && Value <= 64;
683  }
684  bool isImm1_7() const {
685    if (Kind != k_Immediate)
686      return false;
687    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
688    if (!CE) return false;
689    int64_t Value = CE->getValue();
690    return Value > 0 && Value < 8;
691  }
692  bool isImm1_15() const {
693    if (Kind != k_Immediate)
694      return false;
695    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
696    if (!CE) return false;
697    int64_t Value = CE->getValue();
698    return Value > 0 && Value < 16;
699  }
700  bool isImm1_31() const {
701    if (Kind != k_Immediate)
702      return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 32;
707  }
708  bool isImm1_16() const {
709    if (Kind != k_Immediate)
710      return false;
711    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712    if (!CE) return false;
713    int64_t Value = CE->getValue();
714    return Value > 0 && Value < 17;
715  }
716  bool isImm1_32() const {
717    if (Kind != k_Immediate)
718      return false;
719    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
720    if (!CE) return false;
721    int64_t Value = CE->getValue();
722    return Value > 0 && Value < 33;
723  }
724  bool isImm0_32() const {
725    if (Kind != k_Immediate)
726      return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    if (!CE) return false;
729    int64_t Value = CE->getValue();
730    return Value >= 0 && Value < 33;
731  }
732  bool isImm0_65535() const {
733    if (Kind != k_Immediate)
734      return false;
735    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
736    if (!CE) return false;
737    int64_t Value = CE->getValue();
738    return Value >= 0 && Value < 65536;
739  }
740  bool isImm0_65535Expr() const {
741    if (Kind != k_Immediate)
742      return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    // If it's not a constant expression, it'll generate a fixup and be
745    // handled later.
746    if (!CE) return true;
747    int64_t Value = CE->getValue();
748    return Value >= 0 && Value < 65536;
749  }
750  bool isImm24bit() const {
751    if (Kind != k_Immediate)
752      return false;
753    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754    if (!CE) return false;
755    int64_t Value = CE->getValue();
756    return Value >= 0 && Value <= 0xffffff;
757  }
758  bool isImmThumbSR() const {
759    if (Kind != k_Immediate)
760      return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value < 33;
765  }
766  bool isPKHLSLImm() const {
767    if (Kind != k_Immediate)
768      return false;
769    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
770    if (!CE) return false;
771    int64_t Value = CE->getValue();
772    return Value >= 0 && Value < 32;
773  }
774  bool isPKHASRImm() const {
775    if (Kind != k_Immediate)
776      return false;
777    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778    if (!CE) return false;
779    int64_t Value = CE->getValue();
780    return Value > 0 && Value <= 32;
781  }
782  bool isARMSOImm() const {
783    if (Kind != k_Immediate)
784      return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getSOImmVal(Value) != -1;
789  }
790  bool isARMSOImmNot() const {
791    if (Kind != k_Immediate)
792      return false;
793    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
794    if (!CE) return false;
795    int64_t Value = CE->getValue();
796    return ARM_AM::getSOImmVal(~Value) != -1;
797  }
798  bool isARMSOImmNeg() const {
799    if (Kind != k_Immediate)
800      return false;
801    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
802    if (!CE) return false;
803    int64_t Value = CE->getValue();
804    return ARM_AM::getSOImmVal(-Value) != -1;
805  }
806  bool isT2SOImm() const {
807    if (Kind != k_Immediate)
808      return false;
809    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
810    if (!CE) return false;
811    int64_t Value = CE->getValue();
812    return ARM_AM::getT2SOImmVal(Value) != -1;
813  }
814  bool isT2SOImmNot() const {
815    if (Kind != k_Immediate)
816      return false;
817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
818    if (!CE) return false;
819    int64_t Value = CE->getValue();
820    return ARM_AM::getT2SOImmVal(~Value) != -1;
821  }
822  bool isT2SOImmNeg() const {
823    if (Kind != k_Immediate)
824      return false;
825    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
826    if (!CE) return false;
827    int64_t Value = CE->getValue();
828    return ARM_AM::getT2SOImmVal(-Value) != -1;
829  }
830  bool isSetEndImm() const {
831    if (Kind != k_Immediate)
832      return false;
833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
834    if (!CE) return false;
835    int64_t Value = CE->getValue();
836    return Value == 1 || Value == 0;
837  }
838  bool isReg() const { return Kind == k_Register; }
839  bool isRegList() const { return Kind == k_RegisterList; }
840  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
841  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
842  bool isToken() const { return Kind == k_Token; }
843  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
844  bool isMemory() const { return Kind == k_Memory; }
845  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
846  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
847  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
848  bool isRotImm() const { return Kind == k_RotateImmediate; }
849  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
850  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
851  bool isPostIdxReg() const {
852    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
853  }
854  bool isMemNoOffset(bool alignOK = false) const {
855    if (!isMemory())
856      return false;
857    // No offset of any kind.
858    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
859     (alignOK || Memory.Alignment == 0);
860  }
861  bool isAlignedMemory() const {
862    return isMemNoOffset(true);
863  }
864  bool isAddrMode2() const {
865    if (!isMemory() || Memory.Alignment != 0) return false;
866    // Check for register offset.
867    if (Memory.OffsetRegNum) return true;
868    // Immediate offset in range [-4095, 4095].
869    if (!Memory.OffsetImm) return true;
870    int64_t Val = Memory.OffsetImm->getValue();
871    return Val > -4096 && Val < 4096;
872  }
873  bool isAM2OffsetImm() const {
874    if (Kind != k_Immediate)
875      return false;
876    // Immediate offset in range [-4095, 4095].
877    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
878    if (!CE) return false;
879    int64_t Val = CE->getValue();
880    return Val > -4096 && Val < 4096;
881  }
882  bool isAddrMode3() const {
883    if (!isMemory() || Memory.Alignment != 0) return false;
884    // No shifts are legal for AM3.
885    if (Memory.ShiftType != ARM_AM::no_shift) return false;
886    // Check for register offset.
887    if (Memory.OffsetRegNum) return true;
888    // Immediate offset in range [-255, 255].
889    if (!Memory.OffsetImm) return true;
890    int64_t Val = Memory.OffsetImm->getValue();
891    return Val > -256 && Val < 256;
892  }
893  bool isAM3Offset() const {
894    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
895      return false;
896    if (Kind == k_PostIndexRegister)
897      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
898    // Immediate offset in range [-255, 255].
899    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
900    if (!CE) return false;
901    int64_t Val = CE->getValue();
902    // Special case, #-0 is INT32_MIN.
903    return (Val > -256 && Val < 256) || Val == INT32_MIN;
904  }
905  bool isAddrMode5() const {
906    // If we have an immediate that's not a constant, treat it as a label
907    // reference needing a fixup. If it is a constant, it's something else
908    // and we reject it.
909    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
910      return true;
911    if (!isMemory() || Memory.Alignment != 0) return false;
912    // Check for register offset.
913    if (Memory.OffsetRegNum) return false;
914    // Immediate offset in range [-1020, 1020] and a multiple of 4.
915    if (!Memory.OffsetImm) return true;
916    int64_t Val = Memory.OffsetImm->getValue();
917    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
918      Val == INT32_MIN;
919  }
920  bool isMemTBB() const {
921    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
922        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
923      return false;
924    return true;
925  }
926  bool isMemTBH() const {
927    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
928        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
929        Memory.Alignment != 0 )
930      return false;
931    return true;
932  }
933  bool isMemRegOffset() const {
934    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
935      return false;
936    return true;
937  }
938  bool isT2MemRegOffset() const {
939    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
940        Memory.Alignment != 0)
941      return false;
942    // Only lsl #{0, 1, 2, 3} allowed.
943    if (Memory.ShiftType == ARM_AM::no_shift)
944      return true;
945    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
946      return false;
947    return true;
948  }
949  bool isMemThumbRR() const {
950    // Thumb reg+reg addressing is simple. Just two registers, a base and
951    // an offset. No shifts, negations or any other complicating factors.
952    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
953        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
954      return false;
955    return isARMLowRegister(Memory.BaseRegNum) &&
956      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
957  }
958  bool isMemThumbRIs4() const {
959    if (!isMemory() || Memory.OffsetRegNum != 0 ||
960        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
961      return false;
962    // Immediate offset, multiple of 4 in range [0, 124].
963    if (!Memory.OffsetImm) return true;
964    int64_t Val = Memory.OffsetImm->getValue();
965    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
966  }
967  bool isMemThumbRIs2() const {
968    if (!isMemory() || Memory.OffsetRegNum != 0 ||
969        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
970      return false;
971    // Immediate offset, multiple of 4 in range [0, 62].
972    if (!Memory.OffsetImm) return true;
973    int64_t Val = Memory.OffsetImm->getValue();
974    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
975  }
976  bool isMemThumbRIs1() const {
977    if (!isMemory() || Memory.OffsetRegNum != 0 ||
978        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
979      return false;
980    // Immediate offset in range [0, 31].
981    if (!Memory.OffsetImm) return true;
982    int64_t Val = Memory.OffsetImm->getValue();
983    return Val >= 0 && Val <= 31;
984  }
985  bool isMemThumbSPI() const {
986    if (!isMemory() || Memory.OffsetRegNum != 0 ||
987        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
988      return false;
989    // Immediate offset, multiple of 4 in range [0, 1020].
990    if (!Memory.OffsetImm) return true;
991    int64_t Val = Memory.OffsetImm->getValue();
992    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
993  }
994  bool isMemImm8s4Offset() const {
995    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
996      return false;
997    // Immediate offset a multiple of 4 in range [-1020, 1020].
998    if (!Memory.OffsetImm) return true;
999    int64_t Val = Memory.OffsetImm->getValue();
1000    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1001  }
1002  bool isMemImm0_1020s4Offset() const {
1003    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1004      return false;
1005    // Immediate offset a multiple of 4 in range [0, 1020].
1006    if (!Memory.OffsetImm) return true;
1007    int64_t Val = Memory.OffsetImm->getValue();
1008    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1009  }
1010  bool isMemImm8Offset() const {
1011    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1012      return false;
1013    // Immediate offset in range [-255, 255].
1014    if (!Memory.OffsetImm) return true;
1015    int64_t Val = Memory.OffsetImm->getValue();
1016    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1017  }
1018  bool isMemPosImm8Offset() const {
1019    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1020      return false;
1021    // Immediate offset in range [0, 255].
1022    if (!Memory.OffsetImm) return true;
1023    int64_t Val = Memory.OffsetImm->getValue();
1024    return Val >= 0 && Val < 256;
1025  }
1026  bool isMemNegImm8Offset() const {
1027    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1028      return false;
1029    // Immediate offset in range [-255, -1].
1030    if (!Memory.OffsetImm) return false;
1031    int64_t Val = Memory.OffsetImm->getValue();
1032    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1033  }
1034  bool isMemUImm12Offset() const {
1035    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1036      return false;
1037    // Immediate offset in range [0, 4095].
1038    if (!Memory.OffsetImm) return true;
1039    int64_t Val = Memory.OffsetImm->getValue();
1040    return (Val >= 0 && Val < 4096);
1041  }
1042  bool isMemImm12Offset() const {
1043    // If we have an immediate that's not a constant, treat it as a label
1044    // reference needing a fixup. If it is a constant, it's something else
1045    // and we reject it.
1046    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1047      return true;
1048
1049    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1050      return false;
1051    // Immediate offset in range [-4095, 4095].
1052    if (!Memory.OffsetImm) return true;
1053    int64_t Val = Memory.OffsetImm->getValue();
1054    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1055  }
1056  bool isPostIdxImm8() const {
1057    if (Kind != k_Immediate)
1058      return false;
1059    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1060    if (!CE) return false;
1061    int64_t Val = CE->getValue();
1062    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1063  }
1064  bool isPostIdxImm8s4() const {
1065    if (Kind != k_Immediate)
1066      return false;
1067    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1068    if (!CE) return false;
1069    int64_t Val = CE->getValue();
1070    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1071      (Val == INT32_MIN);
1072  }
1073
1074  bool isMSRMask() const { return Kind == k_MSRMask; }
1075  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1076
1077  // NEON operands.
1078  bool isSingleSpacedVectorList() const {
1079    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1080  }
1081  bool isDoubleSpacedVectorList() const {
1082    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1083  }
1084  bool isVecListOneD() const {
1085    if (!isSingleSpacedVectorList()) return false;
1086    return VectorList.Count == 1;
1087  }
1088
1089  bool isVecListTwoD() const {
1090    if (!isSingleSpacedVectorList()) return false;
1091    return VectorList.Count == 2;
1092  }
1093
1094  bool isVecListThreeD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 3;
1097  }
1098
1099  bool isVecListFourD() const {
1100    if (!isSingleSpacedVectorList()) return false;
1101    return VectorList.Count == 4;
1102  }
1103
1104  bool isVecListTwoQ() const {
1105    if (!isDoubleSpacedVectorList()) return false;
1106    return VectorList.Count == 2;
1107  }
1108
1109  bool isVecListOneDAllLanes() const {
1110    if (Kind != k_VectorListAllLanes) return false;
1111    return VectorList.Count == 1;
1112  }
1113
1114  bool isVecListTwoDAllLanes() const {
1115    if (Kind != k_VectorListAllLanes) return false;
1116    return VectorList.Count == 2;
1117  }
1118
1119  bool isVecListOneDByteIndexed() const {
1120    if (Kind != k_VectorListIndexed) return false;
1121    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1122  }
1123
1124  bool isVecListOneDHWordIndexed() const {
1125    if (Kind != k_VectorListIndexed) return false;
1126    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1127  }
1128
1129  bool isVecListOneDWordIndexed() const {
1130    if (Kind != k_VectorListIndexed) return false;
1131    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1132  }
1133
1134  bool isVecListTwoDByteIndexed() const {
1135    if (Kind != k_VectorListIndexed) return false;
1136    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1137  }
1138
1139  bool isVecListTwoDHWordIndexed() const {
1140    if (Kind != k_VectorListIndexed) return false;
1141    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1142  }
1143
1144  bool isVecListTwoDWordIndexed() const {
1145    if (Kind != k_VectorListIndexed) return false;
1146    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1147  }
1148
1149  bool isVectorIndex8() const {
1150    if (Kind != k_VectorIndex) return false;
1151    return VectorIndex.Val < 8;
1152  }
1153  bool isVectorIndex16() const {
1154    if (Kind != k_VectorIndex) return false;
1155    return VectorIndex.Val < 4;
1156  }
1157  bool isVectorIndex32() const {
1158    if (Kind != k_VectorIndex) return false;
1159    return VectorIndex.Val < 2;
1160  }
1161
1162  bool isNEONi8splat() const {
1163    if (Kind != k_Immediate)
1164      return false;
1165    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1166    // Must be a constant.
1167    if (!CE) return false;
1168    int64_t Value = CE->getValue();
1169    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1170    // value.
1171    return Value >= 0 && Value < 256;
1172  }
1173
1174  bool isNEONi16splat() const {
1175    if (Kind != k_Immediate)
1176      return false;
1177    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1178    // Must be a constant.
1179    if (!CE) return false;
1180    int64_t Value = CE->getValue();
1181    // i16 value in the range [0,255] or [0x0100, 0xff00]
1182    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1183  }
1184
1185  bool isNEONi32splat() const {
1186    if (Kind != k_Immediate)
1187      return false;
1188    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1189    // Must be a constant.
1190    if (!CE) return false;
1191    int64_t Value = CE->getValue();
1192    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1193    return (Value >= 0 && Value < 256) ||
1194      (Value >= 0x0100 && Value <= 0xff00) ||
1195      (Value >= 0x010000 && Value <= 0xff0000) ||
1196      (Value >= 0x01000000 && Value <= 0xff000000);
1197  }
1198
1199  bool isNEONi32vmov() const {
1200    if (Kind != k_Immediate)
1201      return false;
1202    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1203    // Must be a constant.
1204    if (!CE) return false;
1205    int64_t Value = CE->getValue();
1206    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1207    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1208    return (Value >= 0 && Value < 256) ||
1209      (Value >= 0x0100 && Value <= 0xff00) ||
1210      (Value >= 0x010000 && Value <= 0xff0000) ||
1211      (Value >= 0x01000000 && Value <= 0xff000000) ||
1212      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1213      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1214  }
1215
1216  bool isNEONi64splat() const {
1217    if (Kind != k_Immediate)
1218      return false;
1219    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1220    // Must be a constant.
1221    if (!CE) return false;
1222    uint64_t Value = CE->getValue();
1223    // i64 value with each byte being either 0 or 0xff.
1224    for (unsigned i = 0; i < 8; ++i)
1225      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1226    return true;
1227  }
1228
1229  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1230    // Add as immediates when possible.  Null MCExpr = 0.
1231    if (Expr == 0)
1232      Inst.addOperand(MCOperand::CreateImm(0));
1233    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1234      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1235    else
1236      Inst.addOperand(MCOperand::CreateExpr(Expr));
1237  }
1238
1239  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1240    assert(N == 2 && "Invalid number of operands!");
1241    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1242    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1243    Inst.addOperand(MCOperand::CreateReg(RegNum));
1244  }
1245
1246  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1247    assert(N == 1 && "Invalid number of operands!");
1248    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1249  }
1250
1251  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1252    assert(N == 1 && "Invalid number of operands!");
1253    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1254  }
1255
1256  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1257    assert(N == 1 && "Invalid number of operands!");
1258    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1259  }
1260
1261  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1262    assert(N == 1 && "Invalid number of operands!");
1263    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1264  }
1265
1266  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1267    assert(N == 1 && "Invalid number of operands!");
1268    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1269  }
1270
1271  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1272    assert(N == 1 && "Invalid number of operands!");
1273    Inst.addOperand(MCOperand::CreateReg(getReg()));
1274  }
1275
1276  void addRegOperands(MCInst &Inst, unsigned N) const {
1277    assert(N == 1 && "Invalid number of operands!");
1278    Inst.addOperand(MCOperand::CreateReg(getReg()));
1279  }
1280
1281  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1282    assert(N == 3 && "Invalid number of operands!");
1283    assert(isRegShiftedReg() &&
1284           "addRegShiftedRegOperands() on non RegShiftedReg!");
1285    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1286    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1287    Inst.addOperand(MCOperand::CreateImm(
1288      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1289  }
1290
1291  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1292    assert(N == 2 && "Invalid number of operands!");
1293    assert(isRegShiftedImm() &&
1294           "addRegShiftedImmOperands() on non RegShiftedImm!");
1295    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1296    Inst.addOperand(MCOperand::CreateImm(
1297      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1298  }
1299
1300  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1301    assert(N == 1 && "Invalid number of operands!");
1302    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1303                                         ShifterImm.Imm));
1304  }
1305
1306  void addRegListOperands(MCInst &Inst, unsigned N) const {
1307    assert(N == 1 && "Invalid number of operands!");
1308    const SmallVectorImpl<unsigned> &RegList = getRegList();
1309    for (SmallVectorImpl<unsigned>::const_iterator
1310           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1311      Inst.addOperand(MCOperand::CreateReg(*I));
1312  }
1313
1314  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1315    addRegListOperands(Inst, N);
1316  }
1317
1318  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1319    addRegListOperands(Inst, N);
1320  }
1321
1322  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1323    assert(N == 1 && "Invalid number of operands!");
1324    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1325    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1326  }
1327
1328  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1329    assert(N == 1 && "Invalid number of operands!");
1330    // Munge the lsb/width into a bitfield mask.
1331    unsigned lsb = Bitfield.LSB;
1332    unsigned width = Bitfield.Width;
1333    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1334    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1335                      (32 - (lsb + width)));
1336    Inst.addOperand(MCOperand::CreateImm(Mask));
1337  }
1338
1339  void addImmOperands(MCInst &Inst, unsigned N) const {
1340    assert(N == 1 && "Invalid number of operands!");
1341    addExpr(Inst, getImm());
1342  }
1343
1344  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1345    assert(N == 1 && "Invalid number of operands!");
1346    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1347  }
1348
1349  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1350    assert(N == 1 && "Invalid number of operands!");
1351    // FIXME: We really want to scale the value here, but the LDRD/STRD
1352    // instruction don't encode operands that way yet.
1353    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1354    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1355  }
1356
1357  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1358    assert(N == 1 && "Invalid number of operands!");
1359    // The immediate is scaled by four in the encoding and is stored
1360    // in the MCInst as such. Lop off the low two bits here.
1361    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1362    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1363  }
1364
1365  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1366    assert(N == 1 && "Invalid number of operands!");
1367    // The immediate is scaled by four in the encoding and is stored
1368    // in the MCInst as such. Lop off the low two bits here.
1369    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1370    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1371  }
1372
1373  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1374    assert(N == 1 && "Invalid number of operands!");
1375    // The constant encodes as the immediate-1, and we store in the instruction
1376    // the bits as encoded, so subtract off one here.
1377    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1378    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1379  }
1380
1381  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1382    assert(N == 1 && "Invalid number of operands!");
1383    // The constant encodes as the immediate-1, and we store in the instruction
1384    // the bits as encoded, so subtract off one here.
1385    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1386    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1387  }
1388
1389  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1390    assert(N == 1 && "Invalid number of operands!");
1391    // The constant encodes as the immediate, except for 32, which encodes as
1392    // zero.
1393    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1394    unsigned Imm = CE->getValue();
1395    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1396  }
1397
1398  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1399    assert(N == 1 && "Invalid number of operands!");
1400    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1401    // the instruction as well.
1402    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1403    int Val = CE->getValue();
1404    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1405  }
1406
1407  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1408    assert(N == 1 && "Invalid number of operands!");
1409    // The operand is actually a t2_so_imm, but we have its bitwise
1410    // negation in the assembly source, so twiddle it here.
1411    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1412    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1413  }
1414
1415  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1416    assert(N == 1 && "Invalid number of operands!");
1417    // The operand is actually a t2_so_imm, but we have its
1418    // negation in the assembly source, so twiddle it here.
1419    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1420    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1421  }
1422
1423  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1424    assert(N == 1 && "Invalid number of operands!");
1425    // The operand is actually a so_imm, but we have its bitwise
1426    // negation in the assembly source, so twiddle it here.
1427    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1428    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1429  }
1430
1431  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1432    assert(N == 1 && "Invalid number of operands!");
1433    // The operand is actually a so_imm, but we have its
1434    // negation in the assembly source, so twiddle it here.
1435    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1436    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1437  }
1438
1439  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1440    assert(N == 1 && "Invalid number of operands!");
1441    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1442  }
1443
1444  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1445    assert(N == 1 && "Invalid number of operands!");
1446    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1447  }
1448
1449  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1450    assert(N == 2 && "Invalid number of operands!");
1451    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1452    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1453  }
1454
1455  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1456    assert(N == 3 && "Invalid number of operands!");
1457    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1458    if (!Memory.OffsetRegNum) {
1459      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1460      // Special case for #-0
1461      if (Val == INT32_MIN) Val = 0;
1462      if (Val < 0) Val = -Val;
1463      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1464    } else {
1465      // For register offset, we encode the shift type and negation flag
1466      // here.
1467      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1468                              Memory.ShiftImm, Memory.ShiftType);
1469    }
1470    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1471    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1472    Inst.addOperand(MCOperand::CreateImm(Val));
1473  }
1474
1475  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 2 && "Invalid number of operands!");
1477    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1478    assert(CE && "non-constant AM2OffsetImm operand!");
1479    int32_t Val = CE->getValue();
1480    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1481    // Special case for #-0
1482    if (Val == INT32_MIN) Val = 0;
1483    if (Val < 0) Val = -Val;
1484    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1485    Inst.addOperand(MCOperand::CreateReg(0));
1486    Inst.addOperand(MCOperand::CreateImm(Val));
1487  }
1488
1489  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1490    assert(N == 3 && "Invalid number of operands!");
1491    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1492    if (!Memory.OffsetRegNum) {
1493      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1494      // Special case for #-0
1495      if (Val == INT32_MIN) Val = 0;
1496      if (Val < 0) Val = -Val;
1497      Val = ARM_AM::getAM3Opc(AddSub, Val);
1498    } else {
1499      // For register offset, we encode the shift type and negation flag
1500      // here.
1501      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1502    }
1503    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1504    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1505    Inst.addOperand(MCOperand::CreateImm(Val));
1506  }
1507
1508  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1509    assert(N == 2 && "Invalid number of operands!");
1510    if (Kind == k_PostIndexRegister) {
1511      int32_t Val =
1512        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1513      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1514      Inst.addOperand(MCOperand::CreateImm(Val));
1515      return;
1516    }
1517
1518    // Constant offset.
1519    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1520    int32_t Val = CE->getValue();
1521    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1522    // Special case for #-0
1523    if (Val == INT32_MIN) Val = 0;
1524    if (Val < 0) Val = -Val;
1525    Val = ARM_AM::getAM3Opc(AddSub, Val);
1526    Inst.addOperand(MCOperand::CreateReg(0));
1527    Inst.addOperand(MCOperand::CreateImm(Val));
1528  }
1529
1530  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1531    assert(N == 2 && "Invalid number of operands!");
1532    // If we have an immediate that's not a constant, treat it as a label
1533    // reference needing a fixup. If it is a constant, it's something else
1534    // and we reject it.
1535    if (isImm()) {
1536      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1537      Inst.addOperand(MCOperand::CreateImm(0));
1538      return;
1539    }
1540
1541    // The lower two bits are always zero and as such are not encoded.
1542    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1543    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1544    // Special case for #-0
1545    if (Val == INT32_MIN) Val = 0;
1546    if (Val < 0) Val = -Val;
1547    Val = ARM_AM::getAM5Opc(AddSub, Val);
1548    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1549    Inst.addOperand(MCOperand::CreateImm(Val));
1550  }
1551
1552  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1553    assert(N == 2 && "Invalid number of operands!");
1554    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1555    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1556    Inst.addOperand(MCOperand::CreateImm(Val));
1557  }
1558
1559  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1560    assert(N == 2 && "Invalid number of operands!");
1561    // The lower two bits are always zero and as such are not encoded.
1562    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1563    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1564    Inst.addOperand(MCOperand::CreateImm(Val));
1565  }
1566
1567  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1568    assert(N == 2 && "Invalid number of operands!");
1569    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1570    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1571    Inst.addOperand(MCOperand::CreateImm(Val));
1572  }
1573
1574  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1575    addMemImm8OffsetOperands(Inst, N);
1576  }
1577
1578  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1579    addMemImm8OffsetOperands(Inst, N);
1580  }
1581
1582  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1583    assert(N == 2 && "Invalid number of operands!");
1584    // If this is an immediate, it's a label reference.
1585    if (Kind == k_Immediate) {
1586      addExpr(Inst, getImm());
1587      Inst.addOperand(MCOperand::CreateImm(0));
1588      return;
1589    }
1590
1591    // Otherwise, it's a normal memory reg+offset.
1592    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1593    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1594    Inst.addOperand(MCOperand::CreateImm(Val));
1595  }
1596
1597  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1598    assert(N == 2 && "Invalid number of operands!");
1599    // If this is an immediate, it's a label reference.
1600    if (Kind == k_Immediate) {
1601      addExpr(Inst, getImm());
1602      Inst.addOperand(MCOperand::CreateImm(0));
1603      return;
1604    }
1605
1606    // Otherwise, it's a normal memory reg+offset.
1607    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1608    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1609    Inst.addOperand(MCOperand::CreateImm(Val));
1610  }
1611
1612  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1613    assert(N == 2 && "Invalid number of operands!");
1614    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1615    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1616  }
1617
1618  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1619    assert(N == 2 && "Invalid number of operands!");
1620    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1621    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1622  }
1623
1624  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1625    assert(N == 3 && "Invalid number of operands!");
1626    unsigned Val =
1627      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1628                        Memory.ShiftImm, Memory.ShiftType);
1629    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1630    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1631    Inst.addOperand(MCOperand::CreateImm(Val));
1632  }
1633
1634  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1635    assert(N == 3 && "Invalid number of operands!");
1636    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1637    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1638    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1639  }
1640
1641  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1642    assert(N == 2 && "Invalid number of operands!");
1643    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1644    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1645  }
1646
1647  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1648    assert(N == 2 && "Invalid number of operands!");
1649    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1650    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1651    Inst.addOperand(MCOperand::CreateImm(Val));
1652  }
1653
1654  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1655    assert(N == 2 && "Invalid number of operands!");
1656    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1657    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1658    Inst.addOperand(MCOperand::CreateImm(Val));
1659  }
1660
1661  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1662    assert(N == 2 && "Invalid number of operands!");
1663    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1664    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1665    Inst.addOperand(MCOperand::CreateImm(Val));
1666  }
1667
1668  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1669    assert(N == 2 && "Invalid number of operands!");
1670    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1671    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1672    Inst.addOperand(MCOperand::CreateImm(Val));
1673  }
1674
1675  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1676    assert(N == 1 && "Invalid number of operands!");
1677    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1678    assert(CE && "non-constant post-idx-imm8 operand!");
1679    int Imm = CE->getValue();
1680    bool isAdd = Imm >= 0;
1681    if (Imm == INT32_MIN) Imm = 0;
1682    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1683    Inst.addOperand(MCOperand::CreateImm(Imm));
1684  }
1685
1686  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1687    assert(N == 1 && "Invalid number of operands!");
1688    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1689    assert(CE && "non-constant post-idx-imm8s4 operand!");
1690    int Imm = CE->getValue();
1691    bool isAdd = Imm >= 0;
1692    if (Imm == INT32_MIN) Imm = 0;
1693    // Immediate is scaled by 4.
1694    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1695    Inst.addOperand(MCOperand::CreateImm(Imm));
1696  }
1697
1698  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1699    assert(N == 2 && "Invalid number of operands!");
1700    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1701    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1702  }
1703
1704  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1705    assert(N == 2 && "Invalid number of operands!");
1706    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1707    // The sign, shift type, and shift amount are encoded in a single operand
1708    // using the AM2 encoding helpers.
1709    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1710    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1711                                     PostIdxReg.ShiftTy);
1712    Inst.addOperand(MCOperand::CreateImm(Imm));
1713  }
1714
1715  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1716    assert(N == 1 && "Invalid number of operands!");
1717    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1718  }
1719
1720  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1721    assert(N == 1 && "Invalid number of operands!");
1722    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1723  }
1724
1725  void addVecListOperands(MCInst &Inst, unsigned N) const {
1726    assert(N == 1 && "Invalid number of operands!");
1727    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1728  }
1729
1730  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1731    assert(N == 2 && "Invalid number of operands!");
1732    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1733    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1734  }
1735
1736  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1737    assert(N == 1 && "Invalid number of operands!");
1738    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1739  }
1740
1741  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1742    assert(N == 1 && "Invalid number of operands!");
1743    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1744  }
1745
1746  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1747    assert(N == 1 && "Invalid number of operands!");
1748    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1749  }
1750
1751  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1752    assert(N == 1 && "Invalid number of operands!");
1753    // The immediate encodes the type of constant as well as the value.
1754    // Mask in that this is an i8 splat.
1755    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1756    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1757  }
1758
1759  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1760    assert(N == 1 && "Invalid number of operands!");
1761    // The immediate encodes the type of constant as well as the value.
1762    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1763    unsigned Value = CE->getValue();
1764    if (Value >= 256)
1765      Value = (Value >> 8) | 0xa00;
1766    else
1767      Value |= 0x800;
1768    Inst.addOperand(MCOperand::CreateImm(Value));
1769  }
1770
1771  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1772    assert(N == 1 && "Invalid number of operands!");
1773    // The immediate encodes the type of constant as well as the value.
1774    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1775    unsigned Value = CE->getValue();
1776    if (Value >= 256 && Value <= 0xff00)
1777      Value = (Value >> 8) | 0x200;
1778    else if (Value > 0xffff && Value <= 0xff0000)
1779      Value = (Value >> 16) | 0x400;
1780    else if (Value > 0xffffff)
1781      Value = (Value >> 24) | 0x600;
1782    Inst.addOperand(MCOperand::CreateImm(Value));
1783  }
1784
1785  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1786    assert(N == 1 && "Invalid number of operands!");
1787    // The immediate encodes the type of constant as well as the value.
1788    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1789    unsigned Value = CE->getValue();
1790    if (Value >= 256 && Value <= 0xffff)
1791      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1792    else if (Value > 0xffff && Value <= 0xffffff)
1793      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1794    else if (Value > 0xffffff)
1795      Value = (Value >> 24) | 0x600;
1796    Inst.addOperand(MCOperand::CreateImm(Value));
1797  }
1798
1799  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1800    assert(N == 1 && "Invalid number of operands!");
1801    // The immediate encodes the type of constant as well as the value.
1802    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1803    uint64_t Value = CE->getValue();
1804    unsigned Imm = 0;
1805    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1806      Imm |= (Value & 1) << i;
1807    }
1808    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1809  }
1810
1811  virtual void print(raw_ostream &OS) const;
1812
1813  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1814    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1815    Op->ITMask.Mask = Mask;
1816    Op->StartLoc = S;
1817    Op->EndLoc = S;
1818    return Op;
1819  }
1820
1821  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1822    ARMOperand *Op = new ARMOperand(k_CondCode);
1823    Op->CC.Val = CC;
1824    Op->StartLoc = S;
1825    Op->EndLoc = S;
1826    return Op;
1827  }
1828
1829  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1830    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1831    Op->Cop.Val = CopVal;
1832    Op->StartLoc = S;
1833    Op->EndLoc = S;
1834    return Op;
1835  }
1836
1837  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1838    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1839    Op->Cop.Val = CopVal;
1840    Op->StartLoc = S;
1841    Op->EndLoc = S;
1842    return Op;
1843  }
1844
1845  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1846    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1847    Op->Cop.Val = Val;
1848    Op->StartLoc = S;
1849    Op->EndLoc = E;
1850    return Op;
1851  }
1852
1853  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1854    ARMOperand *Op = new ARMOperand(k_CCOut);
1855    Op->Reg.RegNum = RegNum;
1856    Op->StartLoc = S;
1857    Op->EndLoc = S;
1858    return Op;
1859  }
1860
1861  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1862    ARMOperand *Op = new ARMOperand(k_Token);
1863    Op->Tok.Data = Str.data();
1864    Op->Tok.Length = Str.size();
1865    Op->StartLoc = S;
1866    Op->EndLoc = S;
1867    return Op;
1868  }
1869
1870  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1871    ARMOperand *Op = new ARMOperand(k_Register);
1872    Op->Reg.RegNum = RegNum;
1873    Op->StartLoc = S;
1874    Op->EndLoc = E;
1875    return Op;
1876  }
1877
1878  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1879                                           unsigned SrcReg,
1880                                           unsigned ShiftReg,
1881                                           unsigned ShiftImm,
1882                                           SMLoc S, SMLoc E) {
1883    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1884    Op->RegShiftedReg.ShiftTy = ShTy;
1885    Op->RegShiftedReg.SrcReg = SrcReg;
1886    Op->RegShiftedReg.ShiftReg = ShiftReg;
1887    Op->RegShiftedReg.ShiftImm = ShiftImm;
1888    Op->StartLoc = S;
1889    Op->EndLoc = E;
1890    return Op;
1891  }
1892
1893  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1894                                            unsigned SrcReg,
1895                                            unsigned ShiftImm,
1896                                            SMLoc S, SMLoc E) {
1897    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1898    Op->RegShiftedImm.ShiftTy = ShTy;
1899    Op->RegShiftedImm.SrcReg = SrcReg;
1900    Op->RegShiftedImm.ShiftImm = ShiftImm;
1901    Op->StartLoc = S;
1902    Op->EndLoc = E;
1903    return Op;
1904  }
1905
1906  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1907                                   SMLoc S, SMLoc E) {
1908    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1909    Op->ShifterImm.isASR = isASR;
1910    Op->ShifterImm.Imm = Imm;
1911    Op->StartLoc = S;
1912    Op->EndLoc = E;
1913    return Op;
1914  }
1915
1916  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1917    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1918    Op->RotImm.Imm = Imm;
1919    Op->StartLoc = S;
1920    Op->EndLoc = E;
1921    return Op;
1922  }
1923
1924  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1925                                    SMLoc S, SMLoc E) {
1926    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1927    Op->Bitfield.LSB = LSB;
1928    Op->Bitfield.Width = Width;
1929    Op->StartLoc = S;
1930    Op->EndLoc = E;
1931    return Op;
1932  }
1933
1934  static ARMOperand *
1935  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1936                SMLoc StartLoc, SMLoc EndLoc) {
1937    KindTy Kind = k_RegisterList;
1938
1939    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1940      Kind = k_DPRRegisterList;
1941    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
1942             contains(Regs.front().first))
1943      Kind = k_SPRRegisterList;
1944
1945    ARMOperand *Op = new ARMOperand(Kind);
1946    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
1947           I = Regs.begin(), E = Regs.end(); I != E; ++I)
1948      Op->Registers.push_back(I->first);
1949    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
1950    Op->StartLoc = StartLoc;
1951    Op->EndLoc = EndLoc;
1952    return Op;
1953  }
1954
1955  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
1956                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
1957    ARMOperand *Op = new ARMOperand(k_VectorList);
1958    Op->VectorList.RegNum = RegNum;
1959    Op->VectorList.Count = Count;
1960    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
1961    Op->StartLoc = S;
1962    Op->EndLoc = E;
1963    return Op;
1964  }
1965
1966  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
1967                                              SMLoc S, SMLoc E) {
1968    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
1969    Op->VectorList.RegNum = RegNum;
1970    Op->VectorList.Count = Count;
1971    Op->StartLoc = S;
1972    Op->EndLoc = E;
1973    return Op;
1974  }
1975
1976  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
1977                                             unsigned Index, SMLoc S, SMLoc E) {
1978    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
1979    Op->VectorList.RegNum = RegNum;
1980    Op->VectorList.Count = Count;
1981    Op->VectorList.LaneIndex = Index;
1982    Op->StartLoc = S;
1983    Op->EndLoc = E;
1984    return Op;
1985  }
1986
1987  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1988                                       MCContext &Ctx) {
1989    ARMOperand *Op = new ARMOperand(k_VectorIndex);
1990    Op->VectorIndex.Val = Idx;
1991    Op->StartLoc = S;
1992    Op->EndLoc = E;
1993    return Op;
1994  }
1995
1996  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
1997    ARMOperand *Op = new ARMOperand(k_Immediate);
1998    Op->Imm.Val = Val;
1999    Op->StartLoc = S;
2000    Op->EndLoc = E;
2001    return Op;
2002  }
2003
2004  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2005    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2006    Op->FPImm.Val = Val;
2007    Op->StartLoc = S;
2008    Op->EndLoc = S;
2009    return Op;
2010  }
2011
2012  static ARMOperand *CreateMem(unsigned BaseRegNum,
2013                               const MCConstantExpr *OffsetImm,
2014                               unsigned OffsetRegNum,
2015                               ARM_AM::ShiftOpc ShiftType,
2016                               unsigned ShiftImm,
2017                               unsigned Alignment,
2018                               bool isNegative,
2019                               SMLoc S, SMLoc E) {
2020    ARMOperand *Op = new ARMOperand(k_Memory);
2021    Op->Memory.BaseRegNum = BaseRegNum;
2022    Op->Memory.OffsetImm = OffsetImm;
2023    Op->Memory.OffsetRegNum = OffsetRegNum;
2024    Op->Memory.ShiftType = ShiftType;
2025    Op->Memory.ShiftImm = ShiftImm;
2026    Op->Memory.Alignment = Alignment;
2027    Op->Memory.isNegative = isNegative;
2028    Op->StartLoc = S;
2029    Op->EndLoc = E;
2030    return Op;
2031  }
2032
2033  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2034                                      ARM_AM::ShiftOpc ShiftTy,
2035                                      unsigned ShiftImm,
2036                                      SMLoc S, SMLoc E) {
2037    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2038    Op->PostIdxReg.RegNum = RegNum;
2039    Op->PostIdxReg.isAdd = isAdd;
2040    Op->PostIdxReg.ShiftTy = ShiftTy;
2041    Op->PostIdxReg.ShiftImm = ShiftImm;
2042    Op->StartLoc = S;
2043    Op->EndLoc = E;
2044    return Op;
2045  }
2046
2047  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2048    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2049    Op->MBOpt.Val = Opt;
2050    Op->StartLoc = S;
2051    Op->EndLoc = S;
2052    return Op;
2053  }
2054
2055  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2056    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2057    Op->IFlags.Val = IFlags;
2058    Op->StartLoc = S;
2059    Op->EndLoc = S;
2060    return Op;
2061  }
2062
2063  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2064    ARMOperand *Op = new ARMOperand(k_MSRMask);
2065    Op->MMask.Val = MMask;
2066    Op->StartLoc = S;
2067    Op->EndLoc = S;
2068    return Op;
2069  }
2070};
2071
2072} // end anonymous namespace.
2073
2074void ARMOperand::print(raw_ostream &OS) const {
2075  switch (Kind) {
2076  case k_FPImmediate:
2077    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2078       << ") >";
2079    break;
2080  case k_CondCode:
2081    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2082    break;
2083  case k_CCOut:
2084    OS << "<ccout " << getReg() << ">";
2085    break;
2086  case k_ITCondMask: {
2087    static const char *MaskStr[] = {
2088      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2089      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2090    };
2091    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2092    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2093    break;
2094  }
2095  case k_CoprocNum:
2096    OS << "<coprocessor number: " << getCoproc() << ">";
2097    break;
2098  case k_CoprocReg:
2099    OS << "<coprocessor register: " << getCoproc() << ">";
2100    break;
2101  case k_CoprocOption:
2102    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2103    break;
2104  case k_MSRMask:
2105    OS << "<mask: " << getMSRMask() << ">";
2106    break;
2107  case k_Immediate:
2108    getImm()->print(OS);
2109    break;
2110  case k_MemBarrierOpt:
2111    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2112    break;
2113  case k_Memory:
2114    OS << "<memory "
2115       << " base:" << Memory.BaseRegNum;
2116    OS << ">";
2117    break;
2118  case k_PostIndexRegister:
2119    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2120       << PostIdxReg.RegNum;
2121    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2122      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2123         << PostIdxReg.ShiftImm;
2124    OS << ">";
2125    break;
2126  case k_ProcIFlags: {
2127    OS << "<ARM_PROC::";
2128    unsigned IFlags = getProcIFlags();
2129    for (int i=2; i >= 0; --i)
2130      if (IFlags & (1 << i))
2131        OS << ARM_PROC::IFlagsToString(1 << i);
2132    OS << ">";
2133    break;
2134  }
2135  case k_Register:
2136    OS << "<register " << getReg() << ">";
2137    break;
2138  case k_ShifterImmediate:
2139    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2140       << " #" << ShifterImm.Imm << ">";
2141    break;
2142  case k_ShiftedRegister:
2143    OS << "<so_reg_reg "
2144       << RegShiftedReg.SrcReg << " "
2145       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2146       << " " << RegShiftedReg.ShiftReg << ">";
2147    break;
2148  case k_ShiftedImmediate:
2149    OS << "<so_reg_imm "
2150       << RegShiftedImm.SrcReg << " "
2151       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2152       << " #" << RegShiftedImm.ShiftImm << ">";
2153    break;
2154  case k_RotateImmediate:
2155    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2156    break;
2157  case k_BitfieldDescriptor:
2158    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2159       << ", width: " << Bitfield.Width << ">";
2160    break;
2161  case k_RegisterList:
2162  case k_DPRRegisterList:
2163  case k_SPRRegisterList: {
2164    OS << "<register_list ";
2165
2166    const SmallVectorImpl<unsigned> &RegList = getRegList();
2167    for (SmallVectorImpl<unsigned>::const_iterator
2168           I = RegList.begin(), E = RegList.end(); I != E; ) {
2169      OS << *I;
2170      if (++I < E) OS << ", ";
2171    }
2172
2173    OS << ">";
2174    break;
2175  }
2176  case k_VectorList:
2177    OS << "<vector_list " << VectorList.Count << " * "
2178       << VectorList.RegNum << ">";
2179    break;
2180  case k_VectorListAllLanes:
2181    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2182       << VectorList.RegNum << ">";
2183    break;
2184  case k_VectorListIndexed:
2185    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2186       << VectorList.Count << " * " << VectorList.RegNum << ">";
2187    break;
2188  case k_Token:
2189    OS << "'" << getToken() << "'";
2190    break;
2191  case k_VectorIndex:
2192    OS << "<vectorindex " << getVectorIndex() << ">";
2193    break;
2194  }
2195}
2196
2197/// @name Auto-generated Match Functions
2198/// {
2199
2200static unsigned MatchRegisterName(StringRef Name);
2201
2202/// }
2203
2204bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2205                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2206  StartLoc = Parser.getTok().getLoc();
2207  RegNo = tryParseRegister();
2208  EndLoc = Parser.getTok().getLoc();
2209
2210  return (RegNo == (unsigned)-1);
2211}
2212
2213/// Try to parse a register name.  The token must be an Identifier when called,
2214/// and if it is a register name the token is eaten and the register number is
2215/// returned.  Otherwise return -1.
2216///
2217int ARMAsmParser::tryParseRegister() {
2218  const AsmToken &Tok = Parser.getTok();
2219  if (Tok.isNot(AsmToken::Identifier)) return -1;
2220
2221  std::string lowerCase = Tok.getString().lower();
2222  unsigned RegNum = MatchRegisterName(lowerCase);
2223  if (!RegNum) {
2224    RegNum = StringSwitch<unsigned>(lowerCase)
2225      .Case("r13", ARM::SP)
2226      .Case("r14", ARM::LR)
2227      .Case("r15", ARM::PC)
2228      .Case("ip", ARM::R12)
2229      // Additional register name aliases for 'gas' compatibility.
2230      .Case("a1", ARM::R0)
2231      .Case("a2", ARM::R1)
2232      .Case("a3", ARM::R2)
2233      .Case("a4", ARM::R3)
2234      .Case("v1", ARM::R4)
2235      .Case("v2", ARM::R5)
2236      .Case("v3", ARM::R6)
2237      .Case("v4", ARM::R7)
2238      .Case("v5", ARM::R8)
2239      .Case("v6", ARM::R9)
2240      .Case("v7", ARM::R10)
2241      .Case("v8", ARM::R11)
2242      .Case("sb", ARM::R9)
2243      .Case("sl", ARM::R10)
2244      .Case("fp", ARM::R11)
2245      .Default(0);
2246  }
2247  if (!RegNum) {
2248    // Check for aliases registered via .req.
2249    StringMap<unsigned>::const_iterator Entry =
2250      RegisterReqs.find(Tok.getIdentifier());
2251    // If no match, return failure.
2252    if (Entry == RegisterReqs.end())
2253      return -1;
2254    Parser.Lex(); // Eat identifier token.
2255    return Entry->getValue();
2256  }
2257
2258  Parser.Lex(); // Eat identifier token.
2259
2260  return RegNum;
2261}
2262
2263// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2264// If a recoverable error occurs, return 1. If an irrecoverable error
2265// occurs, return -1. An irrecoverable error is one where tokens have been
2266// consumed in the process of trying to parse the shifter (i.e., when it is
2267// indeed a shifter operand, but malformed).
2268int ARMAsmParser::tryParseShiftRegister(
2269                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2270  SMLoc S = Parser.getTok().getLoc();
2271  const AsmToken &Tok = Parser.getTok();
2272  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2273
2274  std::string lowerCase = Tok.getString().lower();
2275  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2276      .Case("asl", ARM_AM::lsl)
2277      .Case("lsl", ARM_AM::lsl)
2278      .Case("lsr", ARM_AM::lsr)
2279      .Case("asr", ARM_AM::asr)
2280      .Case("ror", ARM_AM::ror)
2281      .Case("rrx", ARM_AM::rrx)
2282      .Default(ARM_AM::no_shift);
2283
2284  if (ShiftTy == ARM_AM::no_shift)
2285    return 1;
2286
2287  Parser.Lex(); // Eat the operator.
2288
2289  // The source register for the shift has already been added to the
2290  // operand list, so we need to pop it off and combine it into the shifted
2291  // register operand instead.
2292  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2293  if (!PrevOp->isReg())
2294    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2295  int SrcReg = PrevOp->getReg();
2296  int64_t Imm = 0;
2297  int ShiftReg = 0;
2298  if (ShiftTy == ARM_AM::rrx) {
2299    // RRX Doesn't have an explicit shift amount. The encoder expects
2300    // the shift register to be the same as the source register. Seems odd,
2301    // but OK.
2302    ShiftReg = SrcReg;
2303  } else {
2304    // Figure out if this is shifted by a constant or a register (for non-RRX).
2305    if (Parser.getTok().is(AsmToken::Hash) ||
2306        Parser.getTok().is(AsmToken::Dollar)) {
2307      Parser.Lex(); // Eat hash.
2308      SMLoc ImmLoc = Parser.getTok().getLoc();
2309      const MCExpr *ShiftExpr = 0;
2310      if (getParser().ParseExpression(ShiftExpr)) {
2311        Error(ImmLoc, "invalid immediate shift value");
2312        return -1;
2313      }
2314      // The expression must be evaluatable as an immediate.
2315      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2316      if (!CE) {
2317        Error(ImmLoc, "invalid immediate shift value");
2318        return -1;
2319      }
2320      // Range check the immediate.
2321      // lsl, ror: 0 <= imm <= 31
2322      // lsr, asr: 0 <= imm <= 32
2323      Imm = CE->getValue();
2324      if (Imm < 0 ||
2325          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2326          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2327        Error(ImmLoc, "immediate shift value out of range");
2328        return -1;
2329      }
2330    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2331      ShiftReg = tryParseRegister();
2332      SMLoc L = Parser.getTok().getLoc();
2333      if (ShiftReg == -1) {
2334        Error (L, "expected immediate or register in shift operand");
2335        return -1;
2336      }
2337    } else {
2338      Error (Parser.getTok().getLoc(),
2339                    "expected immediate or register in shift operand");
2340      return -1;
2341    }
2342  }
2343
2344  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2345    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2346                                                         ShiftReg, Imm,
2347                                               S, Parser.getTok().getLoc()));
2348  else
2349    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2350                                               S, Parser.getTok().getLoc()));
2351
2352  return 0;
2353}
2354
2355
2356/// Try to parse a register name.  The token must be an Identifier when called.
2357/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2358/// if there is a "writeback". 'true' if it's not a register.
2359///
2360/// TODO this is likely to change to allow different register types and or to
2361/// parse for a specific register type.
2362bool ARMAsmParser::
2363tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2364  SMLoc S = Parser.getTok().getLoc();
2365  int RegNo = tryParseRegister();
2366  if (RegNo == -1)
2367    return true;
2368
2369  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2370
2371  const AsmToken &ExclaimTok = Parser.getTok();
2372  if (ExclaimTok.is(AsmToken::Exclaim)) {
2373    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2374                                               ExclaimTok.getLoc()));
2375    Parser.Lex(); // Eat exclaim token
2376    return false;
2377  }
2378
2379  // Also check for an index operand. This is only legal for vector registers,
2380  // but that'll get caught OK in operand matching, so we don't need to
2381  // explicitly filter everything else out here.
2382  if (Parser.getTok().is(AsmToken::LBrac)) {
2383    SMLoc SIdx = Parser.getTok().getLoc();
2384    Parser.Lex(); // Eat left bracket token.
2385
2386    const MCExpr *ImmVal;
2387    if (getParser().ParseExpression(ImmVal))
2388      return MatchOperand_ParseFail;
2389    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2390    if (!MCE) {
2391      TokError("immediate value expected for vector index");
2392      return MatchOperand_ParseFail;
2393    }
2394
2395    SMLoc E = Parser.getTok().getLoc();
2396    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2397      Error(E, "']' expected");
2398      return MatchOperand_ParseFail;
2399    }
2400
2401    Parser.Lex(); // Eat right bracket token.
2402
2403    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2404                                                     SIdx, E,
2405                                                     getContext()));
2406  }
2407
2408  return false;
2409}
2410
2411/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2412/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2413/// "c5", ...
2414static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2415  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2416  // but efficient.
2417  switch (Name.size()) {
2418  default: break;
2419  case 2:
2420    if (Name[0] != CoprocOp)
2421      return -1;
2422    switch (Name[1]) {
2423    default:  return -1;
2424    case '0': return 0;
2425    case '1': return 1;
2426    case '2': return 2;
2427    case '3': return 3;
2428    case '4': return 4;
2429    case '5': return 5;
2430    case '6': return 6;
2431    case '7': return 7;
2432    case '8': return 8;
2433    case '9': return 9;
2434    }
2435    break;
2436  case 3:
2437    if (Name[0] != CoprocOp || Name[1] != '1')
2438      return -1;
2439    switch (Name[2]) {
2440    default:  return -1;
2441    case '0': return 10;
2442    case '1': return 11;
2443    case '2': return 12;
2444    case '3': return 13;
2445    case '4': return 14;
2446    case '5': return 15;
2447    }
2448    break;
2449  }
2450
2451  return -1;
2452}
2453
2454/// parseITCondCode - Try to parse a condition code for an IT instruction.
2455ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2456parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2457  SMLoc S = Parser.getTok().getLoc();
2458  const AsmToken &Tok = Parser.getTok();
2459  if (!Tok.is(AsmToken::Identifier))
2460    return MatchOperand_NoMatch;
2461  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2462    .Case("eq", ARMCC::EQ)
2463    .Case("ne", ARMCC::NE)
2464    .Case("hs", ARMCC::HS)
2465    .Case("cs", ARMCC::HS)
2466    .Case("lo", ARMCC::LO)
2467    .Case("cc", ARMCC::LO)
2468    .Case("mi", ARMCC::MI)
2469    .Case("pl", ARMCC::PL)
2470    .Case("vs", ARMCC::VS)
2471    .Case("vc", ARMCC::VC)
2472    .Case("hi", ARMCC::HI)
2473    .Case("ls", ARMCC::LS)
2474    .Case("ge", ARMCC::GE)
2475    .Case("lt", ARMCC::LT)
2476    .Case("gt", ARMCC::GT)
2477    .Case("le", ARMCC::LE)
2478    .Case("al", ARMCC::AL)
2479    .Default(~0U);
2480  if (CC == ~0U)
2481    return MatchOperand_NoMatch;
2482  Parser.Lex(); // Eat the token.
2483
2484  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2485
2486  return MatchOperand_Success;
2487}
2488
2489/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2490/// token must be an Identifier when called, and if it is a coprocessor
2491/// number, the token is eaten and the operand is added to the operand list.
2492ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2493parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2494  SMLoc S = Parser.getTok().getLoc();
2495  const AsmToken &Tok = Parser.getTok();
2496  if (Tok.isNot(AsmToken::Identifier))
2497    return MatchOperand_NoMatch;
2498
2499  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2500  if (Num == -1)
2501    return MatchOperand_NoMatch;
2502
2503  Parser.Lex(); // Eat identifier token.
2504  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2505  return MatchOperand_Success;
2506}
2507
2508/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2509/// token must be an Identifier when called, and if it is a coprocessor
2510/// number, the token is eaten and the operand is added to the operand list.
2511ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2512parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2513  SMLoc S = Parser.getTok().getLoc();
2514  const AsmToken &Tok = Parser.getTok();
2515  if (Tok.isNot(AsmToken::Identifier))
2516    return MatchOperand_NoMatch;
2517
2518  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2519  if (Reg == -1)
2520    return MatchOperand_NoMatch;
2521
2522  Parser.Lex(); // Eat identifier token.
2523  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2524  return MatchOperand_Success;
2525}
2526
2527/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2528/// coproc_option : '{' imm0_255 '}'
2529ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2530parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2531  SMLoc S = Parser.getTok().getLoc();
2532
2533  // If this isn't a '{', this isn't a coprocessor immediate operand.
2534  if (Parser.getTok().isNot(AsmToken::LCurly))
2535    return MatchOperand_NoMatch;
2536  Parser.Lex(); // Eat the '{'
2537
2538  const MCExpr *Expr;
2539  SMLoc Loc = Parser.getTok().getLoc();
2540  if (getParser().ParseExpression(Expr)) {
2541    Error(Loc, "illegal expression");
2542    return MatchOperand_ParseFail;
2543  }
2544  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2545  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2546    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2547    return MatchOperand_ParseFail;
2548  }
2549  int Val = CE->getValue();
2550
2551  // Check for and consume the closing '}'
2552  if (Parser.getTok().isNot(AsmToken::RCurly))
2553    return MatchOperand_ParseFail;
2554  SMLoc E = Parser.getTok().getLoc();
2555  Parser.Lex(); // Eat the '}'
2556
2557  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2558  return MatchOperand_Success;
2559}
2560
2561// For register list parsing, we need to map from raw GPR register numbering
2562// to the enumeration values. The enumeration values aren't sorted by
2563// register number due to our using "sp", "lr" and "pc" as canonical names.
2564static unsigned getNextRegister(unsigned Reg) {
2565  // If this is a GPR, we need to do it manually, otherwise we can rely
2566  // on the sort ordering of the enumeration since the other reg-classes
2567  // are sane.
2568  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2569    return Reg + 1;
2570  switch(Reg) {
2571  default: assert(0 && "Invalid GPR number!");
2572  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2573  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2574  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2575  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2576  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2577  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2578  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2579  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2580  }
2581}
2582
2583// Return the low-subreg of a given Q register.
2584static unsigned getDRegFromQReg(unsigned QReg) {
2585  switch (QReg) {
2586  default: llvm_unreachable("expected a Q register!");
2587  case ARM::Q0:  return ARM::D0;
2588  case ARM::Q1:  return ARM::D2;
2589  case ARM::Q2:  return ARM::D4;
2590  case ARM::Q3:  return ARM::D6;
2591  case ARM::Q4:  return ARM::D8;
2592  case ARM::Q5:  return ARM::D10;
2593  case ARM::Q6:  return ARM::D12;
2594  case ARM::Q7:  return ARM::D14;
2595  case ARM::Q8:  return ARM::D16;
2596  case ARM::Q9:  return ARM::D18;
2597  case ARM::Q10: return ARM::D20;
2598  case ARM::Q11: return ARM::D22;
2599  case ARM::Q12: return ARM::D24;
2600  case ARM::Q13: return ARM::D26;
2601  case ARM::Q14: return ARM::D28;
2602  case ARM::Q15: return ARM::D30;
2603  }
2604}
2605
2606/// Parse a register list.
2607bool ARMAsmParser::
2608parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2609  assert(Parser.getTok().is(AsmToken::LCurly) &&
2610         "Token is not a Left Curly Brace");
2611  SMLoc S = Parser.getTok().getLoc();
2612  Parser.Lex(); // Eat '{' token.
2613  SMLoc RegLoc = Parser.getTok().getLoc();
2614
2615  // Check the first register in the list to see what register class
2616  // this is a list of.
2617  int Reg = tryParseRegister();
2618  if (Reg == -1)
2619    return Error(RegLoc, "register expected");
2620
2621  // The reglist instructions have at most 16 registers, so reserve
2622  // space for that many.
2623  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2624
2625  // Allow Q regs and just interpret them as the two D sub-registers.
2626  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2627    Reg = getDRegFromQReg(Reg);
2628    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2629    ++Reg;
2630  }
2631  const MCRegisterClass *RC;
2632  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2633    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2634  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2635    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2636  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2637    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2638  else
2639    return Error(RegLoc, "invalid register in register list");
2640
2641  // Store the register.
2642  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2643
2644  // This starts immediately after the first register token in the list,
2645  // so we can see either a comma or a minus (range separator) as a legal
2646  // next token.
2647  while (Parser.getTok().is(AsmToken::Comma) ||
2648         Parser.getTok().is(AsmToken::Minus)) {
2649    if (Parser.getTok().is(AsmToken::Minus)) {
2650      Parser.Lex(); // Eat the minus.
2651      SMLoc EndLoc = Parser.getTok().getLoc();
2652      int EndReg = tryParseRegister();
2653      if (EndReg == -1)
2654        return Error(EndLoc, "register expected");
2655      // Allow Q regs and just interpret them as the two D sub-registers.
2656      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2657        EndReg = getDRegFromQReg(EndReg) + 1;
2658      // If the register is the same as the start reg, there's nothing
2659      // more to do.
2660      if (Reg == EndReg)
2661        continue;
2662      // The register must be in the same register class as the first.
2663      if (!RC->contains(EndReg))
2664        return Error(EndLoc, "invalid register in register list");
2665      // Ranges must go from low to high.
2666      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2667        return Error(EndLoc, "bad range in register list");
2668
2669      // Add all the registers in the range to the register list.
2670      while (Reg != EndReg) {
2671        Reg = getNextRegister(Reg);
2672        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2673      }
2674      continue;
2675    }
2676    Parser.Lex(); // Eat the comma.
2677    RegLoc = Parser.getTok().getLoc();
2678    int OldReg = Reg;
2679    const AsmToken RegTok = Parser.getTok();
2680    Reg = tryParseRegister();
2681    if (Reg == -1)
2682      return Error(RegLoc, "register expected");
2683    // Allow Q regs and just interpret them as the two D sub-registers.
2684    bool isQReg = false;
2685    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2686      Reg = getDRegFromQReg(Reg);
2687      isQReg = true;
2688    }
2689    // The register must be in the same register class as the first.
2690    if (!RC->contains(Reg))
2691      return Error(RegLoc, "invalid register in register list");
2692    // List must be monotonically increasing.
2693    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2694      return Error(RegLoc, "register list not in ascending order");
2695    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2696      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2697              ") in register list");
2698      continue;
2699    }
2700    // VFP register lists must also be contiguous.
2701    // It's OK to use the enumeration values directly here rather, as the
2702    // VFP register classes have the enum sorted properly.
2703    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2704        Reg != OldReg + 1)
2705      return Error(RegLoc, "non-contiguous register range");
2706    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2707    if (isQReg)
2708      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2709  }
2710
2711  SMLoc E = Parser.getTok().getLoc();
2712  if (Parser.getTok().isNot(AsmToken::RCurly))
2713    return Error(E, "'}' expected");
2714  Parser.Lex(); // Eat '}' token.
2715
2716  // Push the register list operand.
2717  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2718
2719  // The ARM system instruction variants for LDM/STM have a '^' token here.
2720  if (Parser.getTok().is(AsmToken::Caret)) {
2721    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2722    Parser.Lex(); // Eat '^' token.
2723  }
2724
2725  return false;
2726}
2727
2728// Helper function to parse the lane index for vector lists.
2729ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2730parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2731  Index = 0; // Always return a defined index value.
2732  if (Parser.getTok().is(AsmToken::LBrac)) {
2733    Parser.Lex(); // Eat the '['.
2734    if (Parser.getTok().is(AsmToken::RBrac)) {
2735      // "Dn[]" is the 'all lanes' syntax.
2736      LaneKind = AllLanes;
2737      Parser.Lex(); // Eat the ']'.
2738      return MatchOperand_Success;
2739    }
2740    if (Parser.getTok().is(AsmToken::Integer)) {
2741      int64_t Val = Parser.getTok().getIntVal();
2742      // Make this range check context sensitive for .8, .16, .32.
2743      if (Val < 0 && Val > 7)
2744        Error(Parser.getTok().getLoc(), "lane index out of range");
2745      Index = Val;
2746      LaneKind = IndexedLane;
2747      Parser.Lex(); // Eat the token;
2748      if (Parser.getTok().isNot(AsmToken::RBrac))
2749        Error(Parser.getTok().getLoc(), "']' expected");
2750      Parser.Lex(); // Eat the ']'.
2751      return MatchOperand_Success;
2752    }
2753    Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2754    return MatchOperand_ParseFail;
2755  }
2756  LaneKind = NoLanes;
2757  return MatchOperand_Success;
2758}
2759
2760// parse a vector register list
2761ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2762parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2763  VectorLaneTy LaneKind;
2764  unsigned LaneIndex;
2765  SMLoc S = Parser.getTok().getLoc();
2766  // As an extension (to match gas), support a plain D register or Q register
2767  // (without encosing curly braces) as a single or double entry list,
2768  // respectively.
2769  if (Parser.getTok().is(AsmToken::Identifier)) {
2770    int Reg = tryParseRegister();
2771    if (Reg == -1)
2772      return MatchOperand_NoMatch;
2773    SMLoc E = Parser.getTok().getLoc();
2774    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2775      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2776      if (Res != MatchOperand_Success)
2777        return Res;
2778      switch (LaneKind) {
2779      default:
2780        assert(0 && "unexpected lane kind!");
2781      case NoLanes:
2782        E = Parser.getTok().getLoc();
2783        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2784        break;
2785      case AllLanes:
2786        E = Parser.getTok().getLoc();
2787        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2788        break;
2789      case IndexedLane:
2790        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2791                                                               LaneIndex, S,E));
2792        break;
2793      }
2794      return MatchOperand_Success;
2795    }
2796    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2797      Reg = getDRegFromQReg(Reg);
2798      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2799      if (Res != MatchOperand_Success)
2800        return Res;
2801      switch (LaneKind) {
2802      default:
2803        assert(0 && "unexpected lane kind!");
2804      case NoLanes:
2805        E = Parser.getTok().getLoc();
2806        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2807        break;
2808      case AllLanes:
2809        E = Parser.getTok().getLoc();
2810        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2811        break;
2812      case IndexedLane:
2813        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2814                                                               LaneIndex, S,E));
2815        break;
2816      }
2817      return MatchOperand_Success;
2818    }
2819    Error(S, "vector register expected");
2820    return MatchOperand_ParseFail;
2821  }
2822
2823  if (Parser.getTok().isNot(AsmToken::LCurly))
2824    return MatchOperand_NoMatch;
2825
2826  Parser.Lex(); // Eat '{' token.
2827  SMLoc RegLoc = Parser.getTok().getLoc();
2828
2829  int Reg = tryParseRegister();
2830  if (Reg == -1) {
2831    Error(RegLoc, "register expected");
2832    return MatchOperand_ParseFail;
2833  }
2834  unsigned Count = 1;
2835  unsigned Spacing = 0;
2836  unsigned FirstReg = Reg;
2837  // The list is of D registers, but we also allow Q regs and just interpret
2838  // them as the two D sub-registers.
2839  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2840    FirstReg = Reg = getDRegFromQReg(Reg);
2841    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2842                 // it's ambiguous with four-register single spaced.
2843    ++Reg;
2844    ++Count;
2845  }
2846  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2847    return MatchOperand_ParseFail;
2848
2849  while (Parser.getTok().is(AsmToken::Comma) ||
2850         Parser.getTok().is(AsmToken::Minus)) {
2851    if (Parser.getTok().is(AsmToken::Minus)) {
2852      if (!Spacing)
2853        Spacing = 1; // Register range implies a single spaced list.
2854      else if (Spacing == 2) {
2855        Error(Parser.getTok().getLoc(),
2856              "sequential registers in double spaced list");
2857        return MatchOperand_ParseFail;
2858      }
2859      Parser.Lex(); // Eat the minus.
2860      SMLoc EndLoc = Parser.getTok().getLoc();
2861      int EndReg = tryParseRegister();
2862      if (EndReg == -1) {
2863        Error(EndLoc, "register expected");
2864        return MatchOperand_ParseFail;
2865      }
2866      // Allow Q regs and just interpret them as the two D sub-registers.
2867      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2868        EndReg = getDRegFromQReg(EndReg) + 1;
2869      // If the register is the same as the start reg, there's nothing
2870      // more to do.
2871      if (Reg == EndReg)
2872        continue;
2873      // The register must be in the same register class as the first.
2874      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2875        Error(EndLoc, "invalid register in register list");
2876        return MatchOperand_ParseFail;
2877      }
2878      // Ranges must go from low to high.
2879      if (Reg > EndReg) {
2880        Error(EndLoc, "bad range in register list");
2881        return MatchOperand_ParseFail;
2882      }
2883      // Parse the lane specifier if present.
2884      VectorLaneTy NextLaneKind;
2885      unsigned NextLaneIndex;
2886      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2887        return MatchOperand_ParseFail;
2888      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2889        Error(EndLoc, "mismatched lane index in register list");
2890        return MatchOperand_ParseFail;
2891      }
2892      EndLoc = Parser.getTok().getLoc();
2893
2894      // Add all the registers in the range to the register list.
2895      Count += EndReg - Reg;
2896      Reg = EndReg;
2897      continue;
2898    }
2899    Parser.Lex(); // Eat the comma.
2900    RegLoc = Parser.getTok().getLoc();
2901    int OldReg = Reg;
2902    Reg = tryParseRegister();
2903    if (Reg == -1) {
2904      Error(RegLoc, "register expected");
2905      return MatchOperand_ParseFail;
2906    }
2907    // vector register lists must be contiguous.
2908    // It's OK to use the enumeration values directly here rather, as the
2909    // VFP register classes have the enum sorted properly.
2910    //
2911    // The list is of D registers, but we also allow Q regs and just interpret
2912    // them as the two D sub-registers.
2913    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2914      if (!Spacing)
2915        Spacing = 1; // Register range implies a single spaced list.
2916      else if (Spacing == 2) {
2917        Error(RegLoc,
2918              "invalid register in double-spaced list (must be 'D' register')");
2919        return MatchOperand_ParseFail;
2920      }
2921      Reg = getDRegFromQReg(Reg);
2922      if (Reg != OldReg + 1) {
2923        Error(RegLoc, "non-contiguous register range");
2924        return MatchOperand_ParseFail;
2925      }
2926      ++Reg;
2927      Count += 2;
2928      // Parse the lane specifier if present.
2929      VectorLaneTy NextLaneKind;
2930      unsigned NextLaneIndex;
2931      SMLoc EndLoc = Parser.getTok().getLoc();
2932      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2933        return MatchOperand_ParseFail;
2934      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2935        Error(EndLoc, "mismatched lane index in register list");
2936        return MatchOperand_ParseFail;
2937      }
2938      continue;
2939    }
2940    // Normal D register.
2941    // Figure out the register spacing (single or double) of the list if
2942    // we don't know it already.
2943    if (!Spacing)
2944      Spacing = 1 + (Reg == OldReg + 2);
2945
2946    // Just check that it's contiguous and keep going.
2947    if (Reg != OldReg + Spacing) {
2948      Error(RegLoc, "non-contiguous register range");
2949      return MatchOperand_ParseFail;
2950    }
2951    ++Count;
2952    // Parse the lane specifier if present.
2953    VectorLaneTy NextLaneKind;
2954    unsigned NextLaneIndex;
2955    SMLoc EndLoc = Parser.getTok().getLoc();
2956    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2957      return MatchOperand_ParseFail;
2958    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2959      Error(EndLoc, "mismatched lane index in register list");
2960      return MatchOperand_ParseFail;
2961    }
2962    if (Spacing == 2 && LaneKind != NoLanes) {
2963      Error(EndLoc,
2964            "lane index specfier invalid in double spaced register list");
2965      return MatchOperand_ParseFail;
2966    }
2967  }
2968
2969  SMLoc E = Parser.getTok().getLoc();
2970  if (Parser.getTok().isNot(AsmToken::RCurly)) {
2971    Error(E, "'}' expected");
2972    return MatchOperand_ParseFail;
2973  }
2974  Parser.Lex(); // Eat '}' token.
2975
2976  switch (LaneKind) {
2977  default:
2978    assert(0 && "unexpected lane kind in register list.");
2979  case NoLanes:
2980    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
2981                                                    (Spacing == 2), S, E));
2982    break;
2983  case AllLanes:
2984    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
2985                                                            S, E));
2986    break;
2987  case IndexedLane:
2988    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
2989                                                           LaneIndex, S, E));
2990    break;
2991  }
2992  return MatchOperand_Success;
2993}
2994
2995/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
2996ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2997parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2998  SMLoc S = Parser.getTok().getLoc();
2999  const AsmToken &Tok = Parser.getTok();
3000  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3001  StringRef OptStr = Tok.getString();
3002
3003  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3004    .Case("sy",    ARM_MB::SY)
3005    .Case("st",    ARM_MB::ST)
3006    .Case("sh",    ARM_MB::ISH)
3007    .Case("ish",   ARM_MB::ISH)
3008    .Case("shst",  ARM_MB::ISHST)
3009    .Case("ishst", ARM_MB::ISHST)
3010    .Case("nsh",   ARM_MB::NSH)
3011    .Case("un",    ARM_MB::NSH)
3012    .Case("nshst", ARM_MB::NSHST)
3013    .Case("unst",  ARM_MB::NSHST)
3014    .Case("osh",   ARM_MB::OSH)
3015    .Case("oshst", ARM_MB::OSHST)
3016    .Default(~0U);
3017
3018  if (Opt == ~0U)
3019    return MatchOperand_NoMatch;
3020
3021  Parser.Lex(); // Eat identifier token.
3022  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3023  return MatchOperand_Success;
3024}
3025
3026/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3027ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3028parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3029  SMLoc S = Parser.getTok().getLoc();
3030  const AsmToken &Tok = Parser.getTok();
3031  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3032  StringRef IFlagsStr = Tok.getString();
3033
3034  // An iflags string of "none" is interpreted to mean that none of the AIF
3035  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3036  unsigned IFlags = 0;
3037  if (IFlagsStr != "none") {
3038        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3039      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3040        .Case("a", ARM_PROC::A)
3041        .Case("i", ARM_PROC::I)
3042        .Case("f", ARM_PROC::F)
3043        .Default(~0U);
3044
3045      // If some specific iflag is already set, it means that some letter is
3046      // present more than once, this is not acceptable.
3047      if (Flag == ~0U || (IFlags & Flag))
3048        return MatchOperand_NoMatch;
3049
3050      IFlags |= Flag;
3051    }
3052  }
3053
3054  Parser.Lex(); // Eat identifier token.
3055  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3056  return MatchOperand_Success;
3057}
3058
3059/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3060ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3061parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3062  SMLoc S = Parser.getTok().getLoc();
3063  const AsmToken &Tok = Parser.getTok();
3064  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3065  StringRef Mask = Tok.getString();
3066
3067  if (isMClass()) {
3068    // See ARMv6-M 10.1.1
3069    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3070      .Case("apsr", 0)
3071      .Case("iapsr", 1)
3072      .Case("eapsr", 2)
3073      .Case("xpsr", 3)
3074      .Case("ipsr", 5)
3075      .Case("epsr", 6)
3076      .Case("iepsr", 7)
3077      .Case("msp", 8)
3078      .Case("psp", 9)
3079      .Case("primask", 16)
3080      .Case("basepri", 17)
3081      .Case("basepri_max", 18)
3082      .Case("faultmask", 19)
3083      .Case("control", 20)
3084      .Default(~0U);
3085
3086    if (FlagsVal == ~0U)
3087      return MatchOperand_NoMatch;
3088
3089    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3090      // basepri, basepri_max and faultmask only valid for V7m.
3091      return MatchOperand_NoMatch;
3092
3093    Parser.Lex(); // Eat identifier token.
3094    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3095    return MatchOperand_Success;
3096  }
3097
3098  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3099  size_t Start = 0, Next = Mask.find('_');
3100  StringRef Flags = "";
3101  std::string SpecReg = Mask.slice(Start, Next).lower();
3102  if (Next != StringRef::npos)
3103    Flags = Mask.slice(Next+1, Mask.size());
3104
3105  // FlagsVal contains the complete mask:
3106  // 3-0: Mask
3107  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3108  unsigned FlagsVal = 0;
3109
3110  if (SpecReg == "apsr") {
3111    FlagsVal = StringSwitch<unsigned>(Flags)
3112    .Case("nzcvq",  0x8) // same as CPSR_f
3113    .Case("g",      0x4) // same as CPSR_s
3114    .Case("nzcvqg", 0xc) // same as CPSR_fs
3115    .Default(~0U);
3116
3117    if (FlagsVal == ~0U) {
3118      if (!Flags.empty())
3119        return MatchOperand_NoMatch;
3120      else
3121        FlagsVal = 8; // No flag
3122    }
3123  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3124    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3125      Flags = "fc";
3126    for (int i = 0, e = Flags.size(); i != e; ++i) {
3127      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3128      .Case("c", 1)
3129      .Case("x", 2)
3130      .Case("s", 4)
3131      .Case("f", 8)
3132      .Default(~0U);
3133
3134      // If some specific flag is already set, it means that some letter is
3135      // present more than once, this is not acceptable.
3136      if (FlagsVal == ~0U || (FlagsVal & Flag))
3137        return MatchOperand_NoMatch;
3138      FlagsVal |= Flag;
3139    }
3140  } else // No match for special register.
3141    return MatchOperand_NoMatch;
3142
3143  // Special register without flags is NOT equivalent to "fc" flags.
3144  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3145  // two lines would enable gas compatibility at the expense of breaking
3146  // round-tripping.
3147  //
3148  // if (!FlagsVal)
3149  //  FlagsVal = 0x9;
3150
3151  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3152  if (SpecReg == "spsr")
3153    FlagsVal |= 16;
3154
3155  Parser.Lex(); // Eat identifier token.
3156  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3157  return MatchOperand_Success;
3158}
3159
3160ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3161parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3162            int Low, int High) {
3163  const AsmToken &Tok = Parser.getTok();
3164  if (Tok.isNot(AsmToken::Identifier)) {
3165    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3166    return MatchOperand_ParseFail;
3167  }
3168  StringRef ShiftName = Tok.getString();
3169  std::string LowerOp = Op.lower();
3170  std::string UpperOp = Op.upper();
3171  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3172    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3173    return MatchOperand_ParseFail;
3174  }
3175  Parser.Lex(); // Eat shift type token.
3176
3177  // There must be a '#' and a shift amount.
3178  if (Parser.getTok().isNot(AsmToken::Hash) &&
3179      Parser.getTok().isNot(AsmToken::Dollar)) {
3180    Error(Parser.getTok().getLoc(), "'#' expected");
3181    return MatchOperand_ParseFail;
3182  }
3183  Parser.Lex(); // Eat hash token.
3184
3185  const MCExpr *ShiftAmount;
3186  SMLoc Loc = Parser.getTok().getLoc();
3187  if (getParser().ParseExpression(ShiftAmount)) {
3188    Error(Loc, "illegal expression");
3189    return MatchOperand_ParseFail;
3190  }
3191  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3192  if (!CE) {
3193    Error(Loc, "constant expression expected");
3194    return MatchOperand_ParseFail;
3195  }
3196  int Val = CE->getValue();
3197  if (Val < Low || Val > High) {
3198    Error(Loc, "immediate value out of range");
3199    return MatchOperand_ParseFail;
3200  }
3201
3202  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3203
3204  return MatchOperand_Success;
3205}
3206
3207ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3208parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3209  const AsmToken &Tok = Parser.getTok();
3210  SMLoc S = Tok.getLoc();
3211  if (Tok.isNot(AsmToken::Identifier)) {
3212    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3213    return MatchOperand_ParseFail;
3214  }
3215  int Val = StringSwitch<int>(Tok.getString())
3216    .Case("be", 1)
3217    .Case("le", 0)
3218    .Default(-1);
3219  Parser.Lex(); // Eat the token.
3220
3221  if (Val == -1) {
3222    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3223    return MatchOperand_ParseFail;
3224  }
3225  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3226                                                                  getContext()),
3227                                           S, Parser.getTok().getLoc()));
3228  return MatchOperand_Success;
3229}
3230
3231/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3232/// instructions. Legal values are:
3233///     lsl #n  'n' in [0,31]
3234///     asr #n  'n' in [1,32]
3235///             n == 32 encoded as n == 0.
3236ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3237parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3238  const AsmToken &Tok = Parser.getTok();
3239  SMLoc S = Tok.getLoc();
3240  if (Tok.isNot(AsmToken::Identifier)) {
3241    Error(S, "shift operator 'asr' or 'lsl' expected");
3242    return MatchOperand_ParseFail;
3243  }
3244  StringRef ShiftName = Tok.getString();
3245  bool isASR;
3246  if (ShiftName == "lsl" || ShiftName == "LSL")
3247    isASR = false;
3248  else if (ShiftName == "asr" || ShiftName == "ASR")
3249    isASR = true;
3250  else {
3251    Error(S, "shift operator 'asr' or 'lsl' expected");
3252    return MatchOperand_ParseFail;
3253  }
3254  Parser.Lex(); // Eat the operator.
3255
3256  // A '#' and a shift amount.
3257  if (Parser.getTok().isNot(AsmToken::Hash) &&
3258      Parser.getTok().isNot(AsmToken::Dollar)) {
3259    Error(Parser.getTok().getLoc(), "'#' expected");
3260    return MatchOperand_ParseFail;
3261  }
3262  Parser.Lex(); // Eat hash token.
3263
3264  const MCExpr *ShiftAmount;
3265  SMLoc E = Parser.getTok().getLoc();
3266  if (getParser().ParseExpression(ShiftAmount)) {
3267    Error(E, "malformed shift expression");
3268    return MatchOperand_ParseFail;
3269  }
3270  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3271  if (!CE) {
3272    Error(E, "shift amount must be an immediate");
3273    return MatchOperand_ParseFail;
3274  }
3275
3276  int64_t Val = CE->getValue();
3277  if (isASR) {
3278    // Shift amount must be in [1,32]
3279    if (Val < 1 || Val > 32) {
3280      Error(E, "'asr' shift amount must be in range [1,32]");
3281      return MatchOperand_ParseFail;
3282    }
3283    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3284    if (isThumb() && Val == 32) {
3285      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3286      return MatchOperand_ParseFail;
3287    }
3288    if (Val == 32) Val = 0;
3289  } else {
3290    // Shift amount must be in [1,32]
3291    if (Val < 0 || Val > 31) {
3292      Error(E, "'lsr' shift amount must be in range [0,31]");
3293      return MatchOperand_ParseFail;
3294    }
3295  }
3296
3297  E = Parser.getTok().getLoc();
3298  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3299
3300  return MatchOperand_Success;
3301}
3302
3303/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3304/// of instructions. Legal values are:
3305///     ror #n  'n' in {0, 8, 16, 24}
3306ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3307parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3308  const AsmToken &Tok = Parser.getTok();
3309  SMLoc S = Tok.getLoc();
3310  if (Tok.isNot(AsmToken::Identifier))
3311    return MatchOperand_NoMatch;
3312  StringRef ShiftName = Tok.getString();
3313  if (ShiftName != "ror" && ShiftName != "ROR")
3314    return MatchOperand_NoMatch;
3315  Parser.Lex(); // Eat the operator.
3316
3317  // A '#' and a rotate amount.
3318  if (Parser.getTok().isNot(AsmToken::Hash) &&
3319      Parser.getTok().isNot(AsmToken::Dollar)) {
3320    Error(Parser.getTok().getLoc(), "'#' expected");
3321    return MatchOperand_ParseFail;
3322  }
3323  Parser.Lex(); // Eat hash token.
3324
3325  const MCExpr *ShiftAmount;
3326  SMLoc E = Parser.getTok().getLoc();
3327  if (getParser().ParseExpression(ShiftAmount)) {
3328    Error(E, "malformed rotate expression");
3329    return MatchOperand_ParseFail;
3330  }
3331  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3332  if (!CE) {
3333    Error(E, "rotate amount must be an immediate");
3334    return MatchOperand_ParseFail;
3335  }
3336
3337  int64_t Val = CE->getValue();
3338  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3339  // normally, zero is represented in asm by omitting the rotate operand
3340  // entirely.
3341  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3342    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3343    return MatchOperand_ParseFail;
3344  }
3345
3346  E = Parser.getTok().getLoc();
3347  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3348
3349  return MatchOperand_Success;
3350}
3351
3352ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3353parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3354  SMLoc S = Parser.getTok().getLoc();
3355  // The bitfield descriptor is really two operands, the LSB and the width.
3356  if (Parser.getTok().isNot(AsmToken::Hash) &&
3357      Parser.getTok().isNot(AsmToken::Dollar)) {
3358    Error(Parser.getTok().getLoc(), "'#' expected");
3359    return MatchOperand_ParseFail;
3360  }
3361  Parser.Lex(); // Eat hash token.
3362
3363  const MCExpr *LSBExpr;
3364  SMLoc E = Parser.getTok().getLoc();
3365  if (getParser().ParseExpression(LSBExpr)) {
3366    Error(E, "malformed immediate expression");
3367    return MatchOperand_ParseFail;
3368  }
3369  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3370  if (!CE) {
3371    Error(E, "'lsb' operand must be an immediate");
3372    return MatchOperand_ParseFail;
3373  }
3374
3375  int64_t LSB = CE->getValue();
3376  // The LSB must be in the range [0,31]
3377  if (LSB < 0 || LSB > 31) {
3378    Error(E, "'lsb' operand must be in the range [0,31]");
3379    return MatchOperand_ParseFail;
3380  }
3381  E = Parser.getTok().getLoc();
3382
3383  // Expect another immediate operand.
3384  if (Parser.getTok().isNot(AsmToken::Comma)) {
3385    Error(Parser.getTok().getLoc(), "too few operands");
3386    return MatchOperand_ParseFail;
3387  }
3388  Parser.Lex(); // Eat hash token.
3389  if (Parser.getTok().isNot(AsmToken::Hash) &&
3390      Parser.getTok().isNot(AsmToken::Dollar)) {
3391    Error(Parser.getTok().getLoc(), "'#' expected");
3392    return MatchOperand_ParseFail;
3393  }
3394  Parser.Lex(); // Eat hash token.
3395
3396  const MCExpr *WidthExpr;
3397  if (getParser().ParseExpression(WidthExpr)) {
3398    Error(E, "malformed immediate expression");
3399    return MatchOperand_ParseFail;
3400  }
3401  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3402  if (!CE) {
3403    Error(E, "'width' operand must be an immediate");
3404    return MatchOperand_ParseFail;
3405  }
3406
3407  int64_t Width = CE->getValue();
3408  // The LSB must be in the range [1,32-lsb]
3409  if (Width < 1 || Width > 32 - LSB) {
3410    Error(E, "'width' operand must be in the range [1,32-lsb]");
3411    return MatchOperand_ParseFail;
3412  }
3413  E = Parser.getTok().getLoc();
3414
3415  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3416
3417  return MatchOperand_Success;
3418}
3419
3420ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3421parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3422  // Check for a post-index addressing register operand. Specifically:
3423  // postidx_reg := '+' register {, shift}
3424  //              | '-' register {, shift}
3425  //              | register {, shift}
3426
3427  // This method must return MatchOperand_NoMatch without consuming any tokens
3428  // in the case where there is no match, as other alternatives take other
3429  // parse methods.
3430  AsmToken Tok = Parser.getTok();
3431  SMLoc S = Tok.getLoc();
3432  bool haveEaten = false;
3433  bool isAdd = true;
3434  int Reg = -1;
3435  if (Tok.is(AsmToken::Plus)) {
3436    Parser.Lex(); // Eat the '+' token.
3437    haveEaten = true;
3438  } else if (Tok.is(AsmToken::Minus)) {
3439    Parser.Lex(); // Eat the '-' token.
3440    isAdd = false;
3441    haveEaten = true;
3442  }
3443  if (Parser.getTok().is(AsmToken::Identifier))
3444    Reg = tryParseRegister();
3445  if (Reg == -1) {
3446    if (!haveEaten)
3447      return MatchOperand_NoMatch;
3448    Error(Parser.getTok().getLoc(), "register expected");
3449    return MatchOperand_ParseFail;
3450  }
3451  SMLoc E = Parser.getTok().getLoc();
3452
3453  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3454  unsigned ShiftImm = 0;
3455  if (Parser.getTok().is(AsmToken::Comma)) {
3456    Parser.Lex(); // Eat the ','.
3457    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3458      return MatchOperand_ParseFail;
3459  }
3460
3461  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3462                                                  ShiftImm, S, E));
3463
3464  return MatchOperand_Success;
3465}
3466
3467ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3468parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3469  // Check for a post-index addressing register operand. Specifically:
3470  // am3offset := '+' register
3471  //              | '-' register
3472  //              | register
3473  //              | # imm
3474  //              | # + imm
3475  //              | # - imm
3476
3477  // This method must return MatchOperand_NoMatch without consuming any tokens
3478  // in the case where there is no match, as other alternatives take other
3479  // parse methods.
3480  AsmToken Tok = Parser.getTok();
3481  SMLoc S = Tok.getLoc();
3482
3483  // Do immediates first, as we always parse those if we have a '#'.
3484  if (Parser.getTok().is(AsmToken::Hash) ||
3485      Parser.getTok().is(AsmToken::Dollar)) {
3486    Parser.Lex(); // Eat the '#'.
3487    // Explicitly look for a '-', as we need to encode negative zero
3488    // differently.
3489    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3490    const MCExpr *Offset;
3491    if (getParser().ParseExpression(Offset))
3492      return MatchOperand_ParseFail;
3493    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3494    if (!CE) {
3495      Error(S, "constant expression expected");
3496      return MatchOperand_ParseFail;
3497    }
3498    SMLoc E = Tok.getLoc();
3499    // Negative zero is encoded as the flag value INT32_MIN.
3500    int32_t Val = CE->getValue();
3501    if (isNegative && Val == 0)
3502      Val = INT32_MIN;
3503
3504    Operands.push_back(
3505      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3506
3507    return MatchOperand_Success;
3508  }
3509
3510
3511  bool haveEaten = false;
3512  bool isAdd = true;
3513  int Reg = -1;
3514  if (Tok.is(AsmToken::Plus)) {
3515    Parser.Lex(); // Eat the '+' token.
3516    haveEaten = true;
3517  } else if (Tok.is(AsmToken::Minus)) {
3518    Parser.Lex(); // Eat the '-' token.
3519    isAdd = false;
3520    haveEaten = true;
3521  }
3522  if (Parser.getTok().is(AsmToken::Identifier))
3523    Reg = tryParseRegister();
3524  if (Reg == -1) {
3525    if (!haveEaten)
3526      return MatchOperand_NoMatch;
3527    Error(Parser.getTok().getLoc(), "register expected");
3528    return MatchOperand_ParseFail;
3529  }
3530  SMLoc E = Parser.getTok().getLoc();
3531
3532  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3533                                                  0, S, E));
3534
3535  return MatchOperand_Success;
3536}
3537
3538/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3539/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3540/// when they refer multiple MIOperands inside a single one.
3541bool ARMAsmParser::
3542cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3543             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3544  // Rt, Rt2
3545  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3546  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3547  // Create a writeback register dummy placeholder.
3548  Inst.addOperand(MCOperand::CreateReg(0));
3549  // addr
3550  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3551  // pred
3552  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3553  return true;
3554}
3555
3556/// cvtT2StrdPre - Convert parsed operands to MCInst.
3557/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3558/// when they refer multiple MIOperands inside a single one.
3559bool ARMAsmParser::
3560cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3561             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3562  // Create a writeback register dummy placeholder.
3563  Inst.addOperand(MCOperand::CreateReg(0));
3564  // Rt, Rt2
3565  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3566  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3567  // addr
3568  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3569  // pred
3570  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3571  return true;
3572}
3573
3574/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3575/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3576/// when they refer multiple MIOperands inside a single one.
3577bool ARMAsmParser::
3578cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3579                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3580  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3581
3582  // Create a writeback register dummy placeholder.
3583  Inst.addOperand(MCOperand::CreateImm(0));
3584
3585  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3586  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3587  return true;
3588}
3589
3590/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3591/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3592/// when they refer multiple MIOperands inside a single one.
3593bool ARMAsmParser::
3594cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3595                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3596  // Create a writeback register dummy placeholder.
3597  Inst.addOperand(MCOperand::CreateImm(0));
3598  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3599  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3600  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3601  return true;
3602}
3603
3604/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3605/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3606/// when they refer multiple MIOperands inside a single one.
3607bool ARMAsmParser::
3608cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3609                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3610  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3611
3612  // Create a writeback register dummy placeholder.
3613  Inst.addOperand(MCOperand::CreateImm(0));
3614
3615  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3616  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3617  return true;
3618}
3619
3620/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3621/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3622/// when they refer multiple MIOperands inside a single one.
3623bool ARMAsmParser::
3624cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3625                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3626  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3627
3628  // Create a writeback register dummy placeholder.
3629  Inst.addOperand(MCOperand::CreateImm(0));
3630
3631  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3632  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3633  return true;
3634}
3635
3636
3637/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3638/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3639/// when they refer multiple MIOperands inside a single one.
3640bool ARMAsmParser::
3641cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3642                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3643  // Create a writeback register dummy placeholder.
3644  Inst.addOperand(MCOperand::CreateImm(0));
3645  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3646  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3647  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3648  return true;
3649}
3650
3651/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3652/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3653/// when they refer multiple MIOperands inside a single one.
3654bool ARMAsmParser::
3655cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3656                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3657  // Create a writeback register dummy placeholder.
3658  Inst.addOperand(MCOperand::CreateImm(0));
3659  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3660  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3661  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3662  return true;
3663}
3664
3665/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3666/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3667/// when they refer multiple MIOperands inside a single one.
3668bool ARMAsmParser::
3669cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3670                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3671  // Create a writeback register dummy placeholder.
3672  Inst.addOperand(MCOperand::CreateImm(0));
3673  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3674  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3675  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3676  return true;
3677}
3678
3679/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3680/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3681/// when they refer multiple MIOperands inside a single one.
3682bool ARMAsmParser::
3683cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3684                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3685  // Rt
3686  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3687  // Create a writeback register dummy placeholder.
3688  Inst.addOperand(MCOperand::CreateImm(0));
3689  // addr
3690  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3691  // offset
3692  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3693  // pred
3694  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3695  return true;
3696}
3697
3698/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3699/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3700/// when they refer multiple MIOperands inside a single one.
3701bool ARMAsmParser::
3702cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3703                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3704  // Rt
3705  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3706  // Create a writeback register dummy placeholder.
3707  Inst.addOperand(MCOperand::CreateImm(0));
3708  // addr
3709  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3710  // offset
3711  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3712  // pred
3713  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3714  return true;
3715}
3716
3717/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3718/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3719/// when they refer multiple MIOperands inside a single one.
3720bool ARMAsmParser::
3721cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3722                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3723  // Create a writeback register dummy placeholder.
3724  Inst.addOperand(MCOperand::CreateImm(0));
3725  // Rt
3726  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3727  // addr
3728  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3729  // offset
3730  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3731  // pred
3732  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3733  return true;
3734}
3735
3736/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3737/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3738/// when they refer multiple MIOperands inside a single one.
3739bool ARMAsmParser::
3740cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3741                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3742  // Create a writeback register dummy placeholder.
3743  Inst.addOperand(MCOperand::CreateImm(0));
3744  // Rt
3745  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3746  // addr
3747  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3748  // offset
3749  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3750  // pred
3751  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3752  return true;
3753}
3754
3755/// cvtLdrdPre - Convert parsed operands to MCInst.
3756/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3757/// when they refer multiple MIOperands inside a single one.
3758bool ARMAsmParser::
3759cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3760           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3761  // Rt, Rt2
3762  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3763  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3764  // Create a writeback register dummy placeholder.
3765  Inst.addOperand(MCOperand::CreateImm(0));
3766  // addr
3767  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3768  // pred
3769  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3770  return true;
3771}
3772
3773/// cvtStrdPre - Convert parsed operands to MCInst.
3774/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3775/// when they refer multiple MIOperands inside a single one.
3776bool ARMAsmParser::
3777cvtStrdPre(MCInst &Inst, unsigned Opcode,
3778           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3779  // Create a writeback register dummy placeholder.
3780  Inst.addOperand(MCOperand::CreateImm(0));
3781  // Rt, Rt2
3782  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3783  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3784  // addr
3785  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3786  // pred
3787  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3788  return true;
3789}
3790
3791/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3792/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3793/// when they refer multiple MIOperands inside a single one.
3794bool ARMAsmParser::
3795cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3796                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3797  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3798  // Create a writeback register dummy placeholder.
3799  Inst.addOperand(MCOperand::CreateImm(0));
3800  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3801  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3802  return true;
3803}
3804
3805/// cvtThumbMultiple- Convert parsed operands to MCInst.
3806/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3807/// when they refer multiple MIOperands inside a single one.
3808bool ARMAsmParser::
3809cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3810           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3811  // The second source operand must be the same register as the destination
3812  // operand.
3813  if (Operands.size() == 6 &&
3814      (((ARMOperand*)Operands[3])->getReg() !=
3815       ((ARMOperand*)Operands[5])->getReg()) &&
3816      (((ARMOperand*)Operands[3])->getReg() !=
3817       ((ARMOperand*)Operands[4])->getReg())) {
3818    Error(Operands[3]->getStartLoc(),
3819          "destination register must match source register");
3820    return false;
3821  }
3822  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3823  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3824  // If we have a three-operand form, make sure to set Rn to be the operand
3825  // that isn't the same as Rd.
3826  unsigned RegOp = 4;
3827  if (Operands.size() == 6 &&
3828      ((ARMOperand*)Operands[4])->getReg() ==
3829        ((ARMOperand*)Operands[3])->getReg())
3830    RegOp = 5;
3831  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3832  Inst.addOperand(Inst.getOperand(0));
3833  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3834
3835  return true;
3836}
3837
3838bool ARMAsmParser::
3839cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3840              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3841  // Vd
3842  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3843  // Create a writeback register dummy placeholder.
3844  Inst.addOperand(MCOperand::CreateImm(0));
3845  // Vn
3846  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3847  // pred
3848  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3849  return true;
3850}
3851
3852bool ARMAsmParser::
3853cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3854                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3855  // Vd
3856  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3857  // Create a writeback register dummy placeholder.
3858  Inst.addOperand(MCOperand::CreateImm(0));
3859  // Vn
3860  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3861  // Vm
3862  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3863  // pred
3864  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3865  return true;
3866}
3867
3868bool ARMAsmParser::
3869cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3870              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3871  // Create a writeback register dummy placeholder.
3872  Inst.addOperand(MCOperand::CreateImm(0));
3873  // Vn
3874  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3875  // Vt
3876  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3877  // pred
3878  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3879  return true;
3880}
3881
3882bool ARMAsmParser::
3883cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3884                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3885  // Create a writeback register dummy placeholder.
3886  Inst.addOperand(MCOperand::CreateImm(0));
3887  // Vn
3888  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3889  // Vm
3890  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3891  // Vt
3892  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3893  // pred
3894  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3895  return true;
3896}
3897
3898/// Parse an ARM memory expression, return false if successful else return true
3899/// or an error.  The first token must be a '[' when called.
3900bool ARMAsmParser::
3901parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3902  SMLoc S, E;
3903  assert(Parser.getTok().is(AsmToken::LBrac) &&
3904         "Token is not a Left Bracket");
3905  S = Parser.getTok().getLoc();
3906  Parser.Lex(); // Eat left bracket token.
3907
3908  const AsmToken &BaseRegTok = Parser.getTok();
3909  int BaseRegNum = tryParseRegister();
3910  if (BaseRegNum == -1)
3911    return Error(BaseRegTok.getLoc(), "register expected");
3912
3913  // The next token must either be a comma or a closing bracket.
3914  const AsmToken &Tok = Parser.getTok();
3915  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3916    return Error(Tok.getLoc(), "malformed memory operand");
3917
3918  if (Tok.is(AsmToken::RBrac)) {
3919    E = Tok.getLoc();
3920    Parser.Lex(); // Eat right bracket token.
3921
3922    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3923                                             0, 0, false, S, E));
3924
3925    // If there's a pre-indexing writeback marker, '!', just add it as a token
3926    // operand. It's rather odd, but syntactically valid.
3927    if (Parser.getTok().is(AsmToken::Exclaim)) {
3928      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3929      Parser.Lex(); // Eat the '!'.
3930    }
3931
3932    return false;
3933  }
3934
3935  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
3936  Parser.Lex(); // Eat the comma.
3937
3938  // If we have a ':', it's an alignment specifier.
3939  if (Parser.getTok().is(AsmToken::Colon)) {
3940    Parser.Lex(); // Eat the ':'.
3941    E = Parser.getTok().getLoc();
3942
3943    const MCExpr *Expr;
3944    if (getParser().ParseExpression(Expr))
3945     return true;
3946
3947    // The expression has to be a constant. Memory references with relocations
3948    // don't come through here, as they use the <label> forms of the relevant
3949    // instructions.
3950    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3951    if (!CE)
3952      return Error (E, "constant expression expected");
3953
3954    unsigned Align = 0;
3955    switch (CE->getValue()) {
3956    default:
3957      return Error(E, "alignment specifier must be 64, 128, or 256 bits");
3958    case 64:  Align = 8; break;
3959    case 128: Align = 16; break;
3960    case 256: Align = 32; break;
3961    }
3962
3963    // Now we should have the closing ']'
3964    E = Parser.getTok().getLoc();
3965    if (Parser.getTok().isNot(AsmToken::RBrac))
3966      return Error(E, "']' expected");
3967    Parser.Lex(); // Eat right bracket token.
3968
3969    // Don't worry about range checking the value here. That's handled by
3970    // the is*() predicates.
3971    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
3972                                             ARM_AM::no_shift, 0, Align,
3973                                             false, S, E));
3974
3975    // If there's a pre-indexing writeback marker, '!', just add it as a token
3976    // operand.
3977    if (Parser.getTok().is(AsmToken::Exclaim)) {
3978      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3979      Parser.Lex(); // Eat the '!'.
3980    }
3981
3982    return false;
3983  }
3984
3985  // If we have a '#', it's an immediate offset, else assume it's a register
3986  // offset. Be friendly and also accept a plain integer (without a leading
3987  // hash) for gas compatibility.
3988  if (Parser.getTok().is(AsmToken::Hash) ||
3989      Parser.getTok().is(AsmToken::Dollar) ||
3990      Parser.getTok().is(AsmToken::Integer)) {
3991    if (Parser.getTok().isNot(AsmToken::Integer))
3992      Parser.Lex(); // Eat the '#'.
3993    E = Parser.getTok().getLoc();
3994
3995    bool isNegative = getParser().getTok().is(AsmToken::Minus);
3996    const MCExpr *Offset;
3997    if (getParser().ParseExpression(Offset))
3998     return true;
3999
4000    // The expression has to be a constant. Memory references with relocations
4001    // don't come through here, as they use the <label> forms of the relevant
4002    // instructions.
4003    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4004    if (!CE)
4005      return Error (E, "constant expression expected");
4006
4007    // If the constant was #-0, represent it as INT32_MIN.
4008    int32_t Val = CE->getValue();
4009    if (isNegative && Val == 0)
4010      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4011
4012    // Now we should have the closing ']'
4013    E = Parser.getTok().getLoc();
4014    if (Parser.getTok().isNot(AsmToken::RBrac))
4015      return Error(E, "']' expected");
4016    Parser.Lex(); // Eat right bracket token.
4017
4018    // Don't worry about range checking the value here. That's handled by
4019    // the is*() predicates.
4020    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4021                                             ARM_AM::no_shift, 0, 0,
4022                                             false, S, E));
4023
4024    // If there's a pre-indexing writeback marker, '!', just add it as a token
4025    // operand.
4026    if (Parser.getTok().is(AsmToken::Exclaim)) {
4027      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4028      Parser.Lex(); // Eat the '!'.
4029    }
4030
4031    return false;
4032  }
4033
4034  // The register offset is optionally preceded by a '+' or '-'
4035  bool isNegative = false;
4036  if (Parser.getTok().is(AsmToken::Minus)) {
4037    isNegative = true;
4038    Parser.Lex(); // Eat the '-'.
4039  } else if (Parser.getTok().is(AsmToken::Plus)) {
4040    // Nothing to do.
4041    Parser.Lex(); // Eat the '+'.
4042  }
4043
4044  E = Parser.getTok().getLoc();
4045  int OffsetRegNum = tryParseRegister();
4046  if (OffsetRegNum == -1)
4047    return Error(E, "register expected");
4048
4049  // If there's a shift operator, handle it.
4050  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4051  unsigned ShiftImm = 0;
4052  if (Parser.getTok().is(AsmToken::Comma)) {
4053    Parser.Lex(); // Eat the ','.
4054    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4055      return true;
4056  }
4057
4058  // Now we should have the closing ']'
4059  E = Parser.getTok().getLoc();
4060  if (Parser.getTok().isNot(AsmToken::RBrac))
4061    return Error(E, "']' expected");
4062  Parser.Lex(); // Eat right bracket token.
4063
4064  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4065                                           ShiftType, ShiftImm, 0, isNegative,
4066                                           S, E));
4067
4068  // If there's a pre-indexing writeback marker, '!', just add it as a token
4069  // operand.
4070  if (Parser.getTok().is(AsmToken::Exclaim)) {
4071    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4072    Parser.Lex(); // Eat the '!'.
4073  }
4074
4075  return false;
4076}
4077
4078/// parseMemRegOffsetShift - one of these two:
4079///   ( lsl | lsr | asr | ror ) , # shift_amount
4080///   rrx
4081/// return true if it parses a shift otherwise it returns false.
4082bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4083                                          unsigned &Amount) {
4084  SMLoc Loc = Parser.getTok().getLoc();
4085  const AsmToken &Tok = Parser.getTok();
4086  if (Tok.isNot(AsmToken::Identifier))
4087    return true;
4088  StringRef ShiftName = Tok.getString();
4089  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4090      ShiftName == "asl" || ShiftName == "ASL")
4091    St = ARM_AM::lsl;
4092  else if (ShiftName == "lsr" || ShiftName == "LSR")
4093    St = ARM_AM::lsr;
4094  else if (ShiftName == "asr" || ShiftName == "ASR")
4095    St = ARM_AM::asr;
4096  else if (ShiftName == "ror" || ShiftName == "ROR")
4097    St = ARM_AM::ror;
4098  else if (ShiftName == "rrx" || ShiftName == "RRX")
4099    St = ARM_AM::rrx;
4100  else
4101    return Error(Loc, "illegal shift operator");
4102  Parser.Lex(); // Eat shift type token.
4103
4104  // rrx stands alone.
4105  Amount = 0;
4106  if (St != ARM_AM::rrx) {
4107    Loc = Parser.getTok().getLoc();
4108    // A '#' and a shift amount.
4109    const AsmToken &HashTok = Parser.getTok();
4110    if (HashTok.isNot(AsmToken::Hash) &&
4111        HashTok.isNot(AsmToken::Dollar))
4112      return Error(HashTok.getLoc(), "'#' expected");
4113    Parser.Lex(); // Eat hash token.
4114
4115    const MCExpr *Expr;
4116    if (getParser().ParseExpression(Expr))
4117      return true;
4118    // Range check the immediate.
4119    // lsl, ror: 0 <= imm <= 31
4120    // lsr, asr: 0 <= imm <= 32
4121    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4122    if (!CE)
4123      return Error(Loc, "shift amount must be an immediate");
4124    int64_t Imm = CE->getValue();
4125    if (Imm < 0 ||
4126        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4127        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4128      return Error(Loc, "immediate shift value out of range");
4129    Amount = Imm;
4130  }
4131
4132  return false;
4133}
4134
4135/// parseFPImm - A floating point immediate expression operand.
4136ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4137parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4138  SMLoc S = Parser.getTok().getLoc();
4139
4140  if (Parser.getTok().isNot(AsmToken::Hash) &&
4141      Parser.getTok().isNot(AsmToken::Dollar))
4142    return MatchOperand_NoMatch;
4143
4144  // Disambiguate the VMOV forms that can accept an FP immediate.
4145  // vmov.f32 <sreg>, #imm
4146  // vmov.f64 <dreg>, #imm
4147  // vmov.f32 <dreg>, #imm  @ vector f32x2
4148  // vmov.f32 <qreg>, #imm  @ vector f32x4
4149  //
4150  // There are also the NEON VMOV instructions which expect an
4151  // integer constant. Make sure we don't try to parse an FPImm
4152  // for these:
4153  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4154  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4155  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4156                           TyOp->getToken() != ".f64"))
4157    return MatchOperand_NoMatch;
4158
4159  Parser.Lex(); // Eat the '#'.
4160
4161  // Handle negation, as that still comes through as a separate token.
4162  bool isNegative = false;
4163  if (Parser.getTok().is(AsmToken::Minus)) {
4164    isNegative = true;
4165    Parser.Lex();
4166  }
4167  const AsmToken &Tok = Parser.getTok();
4168  if (Tok.is(AsmToken::Real)) {
4169    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4170    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4171    // If we had a '-' in front, toggle the sign bit.
4172    IntVal ^= (uint64_t)isNegative << 63;
4173    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4174    Parser.Lex(); // Eat the token.
4175    if (Val == -1) {
4176      TokError("floating point value out of range");
4177      return MatchOperand_ParseFail;
4178    }
4179    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4180    return MatchOperand_Success;
4181  }
4182  if (Tok.is(AsmToken::Integer)) {
4183    int64_t Val = Tok.getIntVal();
4184    Parser.Lex(); // Eat the token.
4185    if (Val > 255 || Val < 0) {
4186      TokError("encoded floating point value out of range");
4187      return MatchOperand_ParseFail;
4188    }
4189    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4190    return MatchOperand_Success;
4191  }
4192
4193  TokError("invalid floating point immediate");
4194  return MatchOperand_ParseFail;
4195}
4196/// Parse a arm instruction operand.  For now this parses the operand regardless
4197/// of the mnemonic.
4198bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4199                                StringRef Mnemonic) {
4200  SMLoc S, E;
4201
4202  // Check if the current operand has a custom associated parser, if so, try to
4203  // custom parse the operand, or fallback to the general approach.
4204  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4205  if (ResTy == MatchOperand_Success)
4206    return false;
4207  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4208  // there was a match, but an error occurred, in which case, just return that
4209  // the operand parsing failed.
4210  if (ResTy == MatchOperand_ParseFail)
4211    return true;
4212
4213  switch (getLexer().getKind()) {
4214  default:
4215    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4216    return true;
4217  case AsmToken::Identifier: {
4218    // If this is VMRS, check for the apsr_nzcv operand.
4219    if (!tryParseRegisterWithWriteBack(Operands))
4220      return false;
4221    int Res = tryParseShiftRegister(Operands);
4222    if (Res == 0) // success
4223      return false;
4224    else if (Res == -1) // irrecoverable error
4225      return true;
4226    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4227      S = Parser.getTok().getLoc();
4228      Parser.Lex();
4229      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4230      return false;
4231    }
4232
4233    // Fall though for the Identifier case that is not a register or a
4234    // special name.
4235  }
4236  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4237  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4238  case AsmToken::String:  // quoted label names.
4239  case AsmToken::Dot: {   // . as a branch target
4240    // This was not a register so parse other operands that start with an
4241    // identifier (like labels) as expressions and create them as immediates.
4242    const MCExpr *IdVal;
4243    S = Parser.getTok().getLoc();
4244    if (getParser().ParseExpression(IdVal))
4245      return true;
4246    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4247    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4248    return false;
4249  }
4250  case AsmToken::LBrac:
4251    return parseMemory(Operands);
4252  case AsmToken::LCurly:
4253    return parseRegisterList(Operands);
4254  case AsmToken::Dollar:
4255  case AsmToken::Hash: {
4256    // #42 -> immediate.
4257    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4258    S = Parser.getTok().getLoc();
4259    Parser.Lex();
4260    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4261    const MCExpr *ImmVal;
4262    if (getParser().ParseExpression(ImmVal))
4263      return true;
4264    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4265    if (CE) {
4266      int32_t Val = CE->getValue();
4267      if (isNegative && Val == 0)
4268        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4269    }
4270    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4271    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4272    return false;
4273  }
4274  case AsmToken::Colon: {
4275    // ":lower16:" and ":upper16:" expression prefixes
4276    // FIXME: Check it's an expression prefix,
4277    // e.g. (FOO - :lower16:BAR) isn't legal.
4278    ARMMCExpr::VariantKind RefKind;
4279    if (parsePrefix(RefKind))
4280      return true;
4281
4282    const MCExpr *SubExprVal;
4283    if (getParser().ParseExpression(SubExprVal))
4284      return true;
4285
4286    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4287                                                   getContext());
4288    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4289    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4290    return false;
4291  }
4292  }
4293}
4294
4295// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4296//  :lower16: and :upper16:.
4297bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4298  RefKind = ARMMCExpr::VK_ARM_None;
4299
4300  // :lower16: and :upper16: modifiers
4301  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4302  Parser.Lex(); // Eat ':'
4303
4304  if (getLexer().isNot(AsmToken::Identifier)) {
4305    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4306    return true;
4307  }
4308
4309  StringRef IDVal = Parser.getTok().getIdentifier();
4310  if (IDVal == "lower16") {
4311    RefKind = ARMMCExpr::VK_ARM_LO16;
4312  } else if (IDVal == "upper16") {
4313    RefKind = ARMMCExpr::VK_ARM_HI16;
4314  } else {
4315    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4316    return true;
4317  }
4318  Parser.Lex();
4319
4320  if (getLexer().isNot(AsmToken::Colon)) {
4321    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4322    return true;
4323  }
4324  Parser.Lex(); // Eat the last ':'
4325  return false;
4326}
4327
4328/// \brief Given a mnemonic, split out possible predication code and carry
4329/// setting letters to form a canonical mnemonic and flags.
4330//
4331// FIXME: Would be nice to autogen this.
4332// FIXME: This is a bit of a maze of special cases.
4333StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4334                                      unsigned &PredicationCode,
4335                                      bool &CarrySetting,
4336                                      unsigned &ProcessorIMod,
4337                                      StringRef &ITMask) {
4338  PredicationCode = ARMCC::AL;
4339  CarrySetting = false;
4340  ProcessorIMod = 0;
4341
4342  // Ignore some mnemonics we know aren't predicated forms.
4343  //
4344  // FIXME: Would be nice to autogen this.
4345  if ((Mnemonic == "movs" && isThumb()) ||
4346      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4347      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4348      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4349      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4350      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4351      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4352      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal")
4353    return Mnemonic;
4354
4355  // First, split out any predication code. Ignore mnemonics we know aren't
4356  // predicated but do have a carry-set and so weren't caught above.
4357  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4358      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4359      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4360      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4361    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4362      .Case("eq", ARMCC::EQ)
4363      .Case("ne", ARMCC::NE)
4364      .Case("hs", ARMCC::HS)
4365      .Case("cs", ARMCC::HS)
4366      .Case("lo", ARMCC::LO)
4367      .Case("cc", ARMCC::LO)
4368      .Case("mi", ARMCC::MI)
4369      .Case("pl", ARMCC::PL)
4370      .Case("vs", ARMCC::VS)
4371      .Case("vc", ARMCC::VC)
4372      .Case("hi", ARMCC::HI)
4373      .Case("ls", ARMCC::LS)
4374      .Case("ge", ARMCC::GE)
4375      .Case("lt", ARMCC::LT)
4376      .Case("gt", ARMCC::GT)
4377      .Case("le", ARMCC::LE)
4378      .Case("al", ARMCC::AL)
4379      .Default(~0U);
4380    if (CC != ~0U) {
4381      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4382      PredicationCode = CC;
4383    }
4384  }
4385
4386  // Next, determine if we have a carry setting bit. We explicitly ignore all
4387  // the instructions we know end in 's'.
4388  if (Mnemonic.endswith("s") &&
4389      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4390        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4391        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4392        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4393        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4394        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4395        Mnemonic == "fsts" ||
4396        (Mnemonic == "movs" && isThumb()))) {
4397    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4398    CarrySetting = true;
4399  }
4400
4401  // The "cps" instruction can have a interrupt mode operand which is glued into
4402  // the mnemonic. Check if this is the case, split it and parse the imod op
4403  if (Mnemonic.startswith("cps")) {
4404    // Split out any imod code.
4405    unsigned IMod =
4406      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4407      .Case("ie", ARM_PROC::IE)
4408      .Case("id", ARM_PROC::ID)
4409      .Default(~0U);
4410    if (IMod != ~0U) {
4411      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4412      ProcessorIMod = IMod;
4413    }
4414  }
4415
4416  // The "it" instruction has the condition mask on the end of the mnemonic.
4417  if (Mnemonic.startswith("it")) {
4418    ITMask = Mnemonic.slice(2, Mnemonic.size());
4419    Mnemonic = Mnemonic.slice(0, 2);
4420  }
4421
4422  return Mnemonic;
4423}
4424
4425/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4426/// inclusion of carry set or predication code operands.
4427//
4428// FIXME: It would be nice to autogen this.
4429void ARMAsmParser::
4430getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4431                      bool &CanAcceptPredicationCode) {
4432  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4433      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4434      Mnemonic == "add" || Mnemonic == "adc" ||
4435      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4436      Mnemonic == "orr" || Mnemonic == "mvn" ||
4437      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4438      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4439      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4440                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4441                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4442    CanAcceptCarrySet = true;
4443  } else
4444    CanAcceptCarrySet = false;
4445
4446  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4447      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4448      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4449      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4450      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4451      (Mnemonic == "clrex" && !isThumb()) ||
4452      (Mnemonic == "nop" && isThumbOne()) ||
4453      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4454        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4455        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4456      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4457       !isThumb()) ||
4458      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4459    CanAcceptPredicationCode = false;
4460  } else
4461    CanAcceptPredicationCode = true;
4462
4463  if (isThumb()) {
4464    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4465        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4466      CanAcceptPredicationCode = false;
4467  }
4468}
4469
4470bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4471                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4472  // FIXME: This is all horribly hacky. We really need a better way to deal
4473  // with optional operands like this in the matcher table.
4474
4475  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4476  // another does not. Specifically, the MOVW instruction does not. So we
4477  // special case it here and remove the defaulted (non-setting) cc_out
4478  // operand if that's the instruction we're trying to match.
4479  //
4480  // We do this as post-processing of the explicit operands rather than just
4481  // conditionally adding the cc_out in the first place because we need
4482  // to check the type of the parsed immediate operand.
4483  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4484      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4485      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4486      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4487    return true;
4488
4489  // Register-register 'add' for thumb does not have a cc_out operand
4490  // when there are only two register operands.
4491  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4492      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4493      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4494      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4495    return true;
4496  // Register-register 'add' for thumb does not have a cc_out operand
4497  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4498  // have to check the immediate range here since Thumb2 has a variant
4499  // that can handle a different range and has a cc_out operand.
4500  if (((isThumb() && Mnemonic == "add") ||
4501       (isThumbTwo() && Mnemonic == "sub")) &&
4502      Operands.size() == 6 &&
4503      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4504      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4505      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4506      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4507      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4508       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4509    return true;
4510  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4511  // imm0_4095 variant. That's the least-preferred variant when
4512  // selecting via the generic "add" mnemonic, so to know that we
4513  // should remove the cc_out operand, we have to explicitly check that
4514  // it's not one of the other variants. Ugh.
4515  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4516      Operands.size() == 6 &&
4517      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4518      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4519      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4520    // Nest conditions rather than one big 'if' statement for readability.
4521    //
4522    // If either register is a high reg, it's either one of the SP
4523    // variants (handled above) or a 32-bit encoding, so we just
4524    // check against T3.
4525    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4526         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4527        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4528      return false;
4529    // If both registers are low, we're in an IT block, and the immediate is
4530    // in range, we should use encoding T1 instead, which has a cc_out.
4531    if (inITBlock() &&
4532        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4533        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4534        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4535      return false;
4536
4537    // Otherwise, we use encoding T4, which does not have a cc_out
4538    // operand.
4539    return true;
4540  }
4541
4542  // The thumb2 multiply instruction doesn't have a CCOut register, so
4543  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4544  // use the 16-bit encoding or not.
4545  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4546      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4547      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4548      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4549      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4550      // If the registers aren't low regs, the destination reg isn't the
4551      // same as one of the source regs, or the cc_out operand is zero
4552      // outside of an IT block, we have to use the 32-bit encoding, so
4553      // remove the cc_out operand.
4554      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4555       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4556       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4557       !inITBlock() ||
4558       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4559        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4560        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4561        static_cast<ARMOperand*>(Operands[4])->getReg())))
4562    return true;
4563
4564  // Also check the 'mul' syntax variant that doesn't specify an explicit
4565  // destination register.
4566  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4567      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4568      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4569      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4570      // If the registers aren't low regs  or the cc_out operand is zero
4571      // outside of an IT block, we have to use the 32-bit encoding, so
4572      // remove the cc_out operand.
4573      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4574       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4575       !inITBlock()))
4576    return true;
4577
4578
4579
4580  // Register-register 'add/sub' for thumb does not have a cc_out operand
4581  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4582  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4583  // right, this will result in better diagnostics (which operand is off)
4584  // anyway.
4585  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4586      (Operands.size() == 5 || Operands.size() == 6) &&
4587      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4588      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4589      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4590    return true;
4591
4592  return false;
4593}
4594
4595static bool isDataTypeToken(StringRef Tok) {
4596  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4597    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4598    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4599    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4600    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4601    Tok == ".f" || Tok == ".d";
4602}
4603
4604// FIXME: This bit should probably be handled via an explicit match class
4605// in the .td files that matches the suffix instead of having it be
4606// a literal string token the way it is now.
4607static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4608  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4609}
4610
4611static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4612/// Parse an arm instruction mnemonic followed by its operands.
4613bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4614                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4615  // Apply mnemonic aliases before doing anything else, as the destination
4616  // mnemnonic may include suffices and we want to handle them normally.
4617  // The generic tblgen'erated code does this later, at the start of
4618  // MatchInstructionImpl(), but that's too late for aliases that include
4619  // any sort of suffix.
4620  unsigned AvailableFeatures = getAvailableFeatures();
4621  applyMnemonicAliases(Name, AvailableFeatures);
4622
4623  // First check for the ARM-specific .req directive.
4624  if (Parser.getTok().is(AsmToken::Identifier) &&
4625      Parser.getTok().getIdentifier() == ".req") {
4626    parseDirectiveReq(Name, NameLoc);
4627    // We always return 'error' for this, as we're done with this
4628    // statement and don't need to match the 'instruction."
4629    return true;
4630  }
4631
4632  // Create the leading tokens for the mnemonic, split by '.' characters.
4633  size_t Start = 0, Next = Name.find('.');
4634  StringRef Mnemonic = Name.slice(Start, Next);
4635
4636  // Split out the predication code and carry setting flag from the mnemonic.
4637  unsigned PredicationCode;
4638  unsigned ProcessorIMod;
4639  bool CarrySetting;
4640  StringRef ITMask;
4641  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4642                           ProcessorIMod, ITMask);
4643
4644  // In Thumb1, only the branch (B) instruction can be predicated.
4645  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4646    Parser.EatToEndOfStatement();
4647    return Error(NameLoc, "conditional execution not supported in Thumb1");
4648  }
4649
4650  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4651
4652  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4653  // is the mask as it will be for the IT encoding if the conditional
4654  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4655  // where the conditional bit0 is zero, the instruction post-processing
4656  // will adjust the mask accordingly.
4657  if (Mnemonic == "it") {
4658    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4659    if (ITMask.size() > 3) {
4660      Parser.EatToEndOfStatement();
4661      return Error(Loc, "too many conditions on IT instruction");
4662    }
4663    unsigned Mask = 8;
4664    for (unsigned i = ITMask.size(); i != 0; --i) {
4665      char pos = ITMask[i - 1];
4666      if (pos != 't' && pos != 'e') {
4667        Parser.EatToEndOfStatement();
4668        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4669      }
4670      Mask >>= 1;
4671      if (ITMask[i - 1] == 't')
4672        Mask |= 8;
4673    }
4674    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4675  }
4676
4677  // FIXME: This is all a pretty gross hack. We should automatically handle
4678  // optional operands like this via tblgen.
4679
4680  // Next, add the CCOut and ConditionCode operands, if needed.
4681  //
4682  // For mnemonics which can ever incorporate a carry setting bit or predication
4683  // code, our matching model involves us always generating CCOut and
4684  // ConditionCode operands to match the mnemonic "as written" and then we let
4685  // the matcher deal with finding the right instruction or generating an
4686  // appropriate error.
4687  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4688  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4689
4690  // If we had a carry-set on an instruction that can't do that, issue an
4691  // error.
4692  if (!CanAcceptCarrySet && CarrySetting) {
4693    Parser.EatToEndOfStatement();
4694    return Error(NameLoc, "instruction '" + Mnemonic +
4695                 "' can not set flags, but 's' suffix specified");
4696  }
4697  // If we had a predication code on an instruction that can't do that, issue an
4698  // error.
4699  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4700    Parser.EatToEndOfStatement();
4701    return Error(NameLoc, "instruction '" + Mnemonic +
4702                 "' is not predicable, but condition code specified");
4703  }
4704
4705  // Add the carry setting operand, if necessary.
4706  if (CanAcceptCarrySet) {
4707    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4708    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4709                                               Loc));
4710  }
4711
4712  // Add the predication code operand, if necessary.
4713  if (CanAcceptPredicationCode) {
4714    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4715                                      CarrySetting);
4716    Operands.push_back(ARMOperand::CreateCondCode(
4717                         ARMCC::CondCodes(PredicationCode), Loc));
4718  }
4719
4720  // Add the processor imod operand, if necessary.
4721  if (ProcessorIMod) {
4722    Operands.push_back(ARMOperand::CreateImm(
4723          MCConstantExpr::Create(ProcessorIMod, getContext()),
4724                                 NameLoc, NameLoc));
4725  }
4726
4727  // Add the remaining tokens in the mnemonic.
4728  while (Next != StringRef::npos) {
4729    Start = Next;
4730    Next = Name.find('.', Start + 1);
4731    StringRef ExtraToken = Name.slice(Start, Next);
4732
4733    // Some NEON instructions have an optional datatype suffix that is
4734    // completely ignored. Check for that.
4735    if (isDataTypeToken(ExtraToken) &&
4736        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4737      continue;
4738
4739    if (ExtraToken != ".n") {
4740      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4741      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4742    }
4743  }
4744
4745  // Read the remaining operands.
4746  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4747    // Read the first operand.
4748    if (parseOperand(Operands, Mnemonic)) {
4749      Parser.EatToEndOfStatement();
4750      return true;
4751    }
4752
4753    while (getLexer().is(AsmToken::Comma)) {
4754      Parser.Lex();  // Eat the comma.
4755
4756      // Parse and remember the operand.
4757      if (parseOperand(Operands, Mnemonic)) {
4758        Parser.EatToEndOfStatement();
4759        return true;
4760      }
4761    }
4762  }
4763
4764  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4765    SMLoc Loc = getLexer().getLoc();
4766    Parser.EatToEndOfStatement();
4767    return Error(Loc, "unexpected token in argument list");
4768  }
4769
4770  Parser.Lex(); // Consume the EndOfStatement
4771
4772  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4773  // do and don't have a cc_out optional-def operand. With some spot-checks
4774  // of the operand list, we can figure out which variant we're trying to
4775  // parse and adjust accordingly before actually matching. We shouldn't ever
4776  // try to remove a cc_out operand that was explicitly set on the the
4777  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4778  // table driven matcher doesn't fit well with the ARM instruction set.
4779  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4780    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4781    Operands.erase(Operands.begin() + 1);
4782    delete Op;
4783  }
4784
4785  // ARM mode 'blx' need special handling, as the register operand version
4786  // is predicable, but the label operand version is not. So, we can't rely
4787  // on the Mnemonic based checking to correctly figure out when to put
4788  // a k_CondCode operand in the list. If we're trying to match the label
4789  // version, remove the k_CondCode operand here.
4790  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4791      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4792    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4793    Operands.erase(Operands.begin() + 1);
4794    delete Op;
4795  }
4796
4797  // The vector-compare-to-zero instructions have a literal token "#0" at
4798  // the end that comes to here as an immediate operand. Convert it to a
4799  // token to play nicely with the matcher.
4800  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4801      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4802      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4803    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4804    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4805    if (CE && CE->getValue() == 0) {
4806      Operands.erase(Operands.begin() + 5);
4807      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4808      delete Op;
4809    }
4810  }
4811  // VCMP{E} does the same thing, but with a different operand count.
4812  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4813      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4814    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4815    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4816    if (CE && CE->getValue() == 0) {
4817      Operands.erase(Operands.begin() + 4);
4818      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4819      delete Op;
4820    }
4821  }
4822  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4823  // end. Convert it to a token here. Take care not to convert those
4824  // that should hit the Thumb2 encoding.
4825  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4826      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4827      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4828      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4829    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4830    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4831    if (CE && CE->getValue() == 0 &&
4832        (isThumbOne() ||
4833         // The cc_out operand matches the IT block.
4834         ((inITBlock() != CarrySetting) &&
4835         // Neither register operand is a high register.
4836         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4837          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4838      Operands.erase(Operands.begin() + 5);
4839      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4840      delete Op;
4841    }
4842  }
4843
4844  return false;
4845}
4846
4847// Validate context-sensitive operand constraints.
4848
4849// return 'true' if register list contains non-low GPR registers,
4850// 'false' otherwise. If Reg is in the register list or is HiReg, set
4851// 'containsReg' to true.
4852static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4853                                 unsigned HiReg, bool &containsReg) {
4854  containsReg = false;
4855  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4856    unsigned OpReg = Inst.getOperand(i).getReg();
4857    if (OpReg == Reg)
4858      containsReg = true;
4859    // Anything other than a low register isn't legal here.
4860    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4861      return true;
4862  }
4863  return false;
4864}
4865
4866// Check if the specified regisgter is in the register list of the inst,
4867// starting at the indicated operand number.
4868static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4869  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4870    unsigned OpReg = Inst.getOperand(i).getReg();
4871    if (OpReg == Reg)
4872      return true;
4873  }
4874  return false;
4875}
4876
4877// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4878// the ARMInsts array) instead. Getting that here requires awkward
4879// API changes, though. Better way?
4880namespace llvm {
4881extern const MCInstrDesc ARMInsts[];
4882}
4883static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4884  return ARMInsts[Opcode];
4885}
4886
4887// FIXME: We would really like to be able to tablegen'erate this.
4888bool ARMAsmParser::
4889validateInstruction(MCInst &Inst,
4890                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4891  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4892  SMLoc Loc = Operands[0]->getStartLoc();
4893  // Check the IT block state first.
4894  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4895  // being allowed in IT blocks, but not being predicable.  It just always
4896  // executes.
4897  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4898    unsigned bit = 1;
4899    if (ITState.FirstCond)
4900      ITState.FirstCond = false;
4901    else
4902      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4903    // The instruction must be predicable.
4904    if (!MCID.isPredicable())
4905      return Error(Loc, "instructions in IT block must be predicable");
4906    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4907    unsigned ITCond = bit ? ITState.Cond :
4908      ARMCC::getOppositeCondition(ITState.Cond);
4909    if (Cond != ITCond) {
4910      // Find the condition code Operand to get its SMLoc information.
4911      SMLoc CondLoc;
4912      for (unsigned i = 1; i < Operands.size(); ++i)
4913        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4914          CondLoc = Operands[i]->getStartLoc();
4915      return Error(CondLoc, "incorrect condition in IT block; got '" +
4916                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4917                   "', but expected '" +
4918                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4919    }
4920  // Check for non-'al' condition codes outside of the IT block.
4921  } else if (isThumbTwo() && MCID.isPredicable() &&
4922             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4923             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4924             Inst.getOpcode() != ARM::t2B)
4925    return Error(Loc, "predicated instructions must be in IT block");
4926
4927  switch (Inst.getOpcode()) {
4928  case ARM::LDRD:
4929  case ARM::LDRD_PRE:
4930  case ARM::LDRD_POST:
4931  case ARM::LDREXD: {
4932    // Rt2 must be Rt + 1.
4933    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4934    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4935    if (Rt2 != Rt + 1)
4936      return Error(Operands[3]->getStartLoc(),
4937                   "destination operands must be sequential");
4938    return false;
4939  }
4940  case ARM::STRD: {
4941    // Rt2 must be Rt + 1.
4942    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4943    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4944    if (Rt2 != Rt + 1)
4945      return Error(Operands[3]->getStartLoc(),
4946                   "source operands must be sequential");
4947    return false;
4948  }
4949  case ARM::STRD_PRE:
4950  case ARM::STRD_POST:
4951  case ARM::STREXD: {
4952    // Rt2 must be Rt + 1.
4953    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4954    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
4955    if (Rt2 != Rt + 1)
4956      return Error(Operands[3]->getStartLoc(),
4957                   "source operands must be sequential");
4958    return false;
4959  }
4960  case ARM::SBFX:
4961  case ARM::UBFX: {
4962    // width must be in range [1, 32-lsb]
4963    unsigned lsb = Inst.getOperand(2).getImm();
4964    unsigned widthm1 = Inst.getOperand(3).getImm();
4965    if (widthm1 >= 32 - lsb)
4966      return Error(Operands[5]->getStartLoc(),
4967                   "bitfield width must be in range [1,32-lsb]");
4968    return false;
4969  }
4970  case ARM::tLDMIA: {
4971    // If we're parsing Thumb2, the .w variant is available and handles
4972    // most cases that are normally illegal for a Thumb1 LDM
4973    // instruction. We'll make the transformation in processInstruction()
4974    // if necessary.
4975    //
4976    // Thumb LDM instructions are writeback iff the base register is not
4977    // in the register list.
4978    unsigned Rn = Inst.getOperand(0).getReg();
4979    bool hasWritebackToken =
4980      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
4981       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
4982    bool listContainsBase;
4983    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
4984      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
4985                   "registers must be in range r0-r7");
4986    // If we should have writeback, then there should be a '!' token.
4987    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
4988      return Error(Operands[2]->getStartLoc(),
4989                   "writeback operator '!' expected");
4990    // If we should not have writeback, there must not be a '!'. This is
4991    // true even for the 32-bit wide encodings.
4992    if (listContainsBase && hasWritebackToken)
4993      return Error(Operands[3]->getStartLoc(),
4994                   "writeback operator '!' not allowed when base register "
4995                   "in register list");
4996
4997    break;
4998  }
4999  case ARM::t2LDMIA_UPD: {
5000    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5001      return Error(Operands[4]->getStartLoc(),
5002                   "writeback operator '!' not allowed when base register "
5003                   "in register list");
5004    break;
5005  }
5006  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5007  // so only issue a diagnostic for thumb1. The instructions will be
5008  // switched to the t2 encodings in processInstruction() if necessary.
5009  case ARM::tPOP: {
5010    bool listContainsBase;
5011    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5012        !isThumbTwo())
5013      return Error(Operands[2]->getStartLoc(),
5014                   "registers must be in range r0-r7 or pc");
5015    break;
5016  }
5017  case ARM::tPUSH: {
5018    bool listContainsBase;
5019    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5020        !isThumbTwo())
5021      return Error(Operands[2]->getStartLoc(),
5022                   "registers must be in range r0-r7 or lr");
5023    break;
5024  }
5025  case ARM::tSTMIA_UPD: {
5026    bool listContainsBase;
5027    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5028      return Error(Operands[4]->getStartLoc(),
5029                   "registers must be in range r0-r7");
5030    break;
5031  }
5032  }
5033
5034  return false;
5035}
5036
5037static unsigned getRealVSTLNOpcode(unsigned Opc) {
5038  switch(Opc) {
5039  default: assert(0 && "unexpected opcode!");
5040  // VST1LN
5041  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5042  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5043  case ARM::VST1LNdWB_fixed_Asm_U8:
5044    return ARM::VST1LNd8_UPD;
5045  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5046  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5047  case ARM::VST1LNdWB_fixed_Asm_U16:
5048    return ARM::VST1LNd16_UPD;
5049  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5050  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5051  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5052    return ARM::VST1LNd32_UPD;
5053  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5054  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5055  case ARM::VST1LNdWB_register_Asm_U8:
5056    return ARM::VST1LNd8_UPD;
5057  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5058  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5059  case ARM::VST1LNdWB_register_Asm_U16:
5060    return ARM::VST1LNd16_UPD;
5061  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5062  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5063  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5064    return ARM::VST1LNd32_UPD;
5065  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5066  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5067  case ARM::VST1LNdAsm_U8:
5068    return ARM::VST1LNd8;
5069  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5070  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5071  case ARM::VST1LNdAsm_U16:
5072    return ARM::VST1LNd16;
5073  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5074  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5075  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5076    return ARM::VST1LNd32;
5077
5078  // VST2LN
5079  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5080  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5081  case ARM::VST2LNdWB_fixed_Asm_U8:
5082    return ARM::VST2LNd8_UPD;
5083  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5084  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5085  case ARM::VST2LNdWB_fixed_Asm_U16:
5086    return ARM::VST2LNd16_UPD;
5087  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5088  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5089  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5090    return ARM::VST2LNd32_UPD;
5091  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5092  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5093  case ARM::VST2LNdWB_register_Asm_U8:
5094    return ARM::VST2LNd8_UPD;
5095  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5096  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5097  case ARM::VST2LNdWB_register_Asm_U16:
5098    return ARM::VST2LNd16_UPD;
5099  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5100  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5101  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5102    return ARM::VST2LNd32_UPD;
5103  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5104  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5105  case ARM::VST2LNdAsm_U8:
5106    return ARM::VST2LNd8;
5107  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5108  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5109  case ARM::VST2LNdAsm_U16:
5110    return ARM::VST2LNd16;
5111  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5112  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5113  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5114    return ARM::VST2LNd32;
5115  }
5116}
5117
5118static unsigned getRealVLDLNOpcode(unsigned Opc) {
5119  switch(Opc) {
5120  default: assert(0 && "unexpected opcode!");
5121  // VLD1LN
5122  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5123  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5124  case ARM::VLD1LNdWB_fixed_Asm_U8:
5125    return ARM::VLD1LNd8_UPD;
5126  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5127  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5128  case ARM::VLD1LNdWB_fixed_Asm_U16:
5129    return ARM::VLD1LNd16_UPD;
5130  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5131  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5132  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5133    return ARM::VLD1LNd32_UPD;
5134  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5135  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5136  case ARM::VLD1LNdWB_register_Asm_U8:
5137    return ARM::VLD1LNd8_UPD;
5138  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5139  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5140  case ARM::VLD1LNdWB_register_Asm_U16:
5141    return ARM::VLD1LNd16_UPD;
5142  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5143  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5144  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5145    return ARM::VLD1LNd32_UPD;
5146  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5147  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5148  case ARM::VLD1LNdAsm_U8:
5149    return ARM::VLD1LNd8;
5150  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5151  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5152  case ARM::VLD1LNdAsm_U16:
5153    return ARM::VLD1LNd16;
5154  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5155  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5156  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5157    return ARM::VLD1LNd32;
5158
5159  // VLD2LN
5160  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5161  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5162  case ARM::VLD2LNdWB_fixed_Asm_U8:
5163    return ARM::VLD2LNd8_UPD;
5164  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5165  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5166  case ARM::VLD2LNdWB_fixed_Asm_U16:
5167    return ARM::VLD2LNd16_UPD;
5168  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5169  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5170  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5171    return ARM::VLD2LNd32_UPD;
5172  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5173  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5174  case ARM::VLD2LNdWB_register_Asm_U8:
5175    return ARM::VLD2LNd8_UPD;
5176  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5177  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5178  case ARM::VLD2LNdWB_register_Asm_U16:
5179    return ARM::VLD2LNd16_UPD;
5180  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5181  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5182  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5183    return ARM::VLD2LNd32_UPD;
5184  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5185  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5186  case ARM::VLD2LNdAsm_U8:
5187    return ARM::VLD2LNd8;
5188  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5189  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5190  case ARM::VLD2LNdAsm_U16:
5191    return ARM::VLD2LNd16;
5192  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5193  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5194  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5195    return ARM::VLD2LNd32;
5196  }
5197}
5198
5199bool ARMAsmParser::
5200processInstruction(MCInst &Inst,
5201                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5202  switch (Inst.getOpcode()) {
5203  // Handle NEON VST complex aliases.
5204  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5205  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5206  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5207  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5208  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5209  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5210  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5211  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5212    MCInst TmpInst;
5213    // Shuffle the operands around so the lane index operand is in the
5214    // right place.
5215    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5216    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5217    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5218    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5219    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5220    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5221    TmpInst.addOperand(Inst.getOperand(1)); // lane
5222    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5223    TmpInst.addOperand(Inst.getOperand(6));
5224    Inst = TmpInst;
5225    return true;
5226  }
5227
5228  case ARM::VST2LNdWB_register_Asm_8: case ARM::VST2LNdWB_register_Asm_P8:
5229  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5230  case ARM::VST2LNdWB_register_Asm_U8: case ARM::VST2LNdWB_register_Asm_16:
5231  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5232  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5233  case ARM::VST2LNdWB_register_Asm_32: case ARM::VST2LNdWB_register_Asm_F:
5234  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5235  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32: {
5236    MCInst TmpInst;
5237    // Shuffle the operands around so the lane index operand is in the
5238    // right place.
5239    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5240    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5241    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5242    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5243    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5244    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5245    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5246    TmpInst.addOperand(Inst.getOperand(1)); // lane
5247    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5248    TmpInst.addOperand(Inst.getOperand(6));
5249    Inst = TmpInst;
5250    return true;
5251  }
5252  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5253  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5254  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5255  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5256  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5257  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5258  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5259  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5260    MCInst TmpInst;
5261    // Shuffle the operands around so the lane index operand is in the
5262    // right place.
5263    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5264    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5265    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5266    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5267    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5268    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5269    TmpInst.addOperand(Inst.getOperand(1)); // lane
5270    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5271    TmpInst.addOperand(Inst.getOperand(5));
5272    Inst = TmpInst;
5273    return true;
5274  }
5275
5276  case ARM::VST2LNdWB_fixed_Asm_8: case ARM::VST2LNdWB_fixed_Asm_P8:
5277  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5278  case ARM::VST2LNdWB_fixed_Asm_U8: case ARM::VST2LNdWB_fixed_Asm_16:
5279  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5280  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5281  case ARM::VST2LNdWB_fixed_Asm_32: case ARM::VST2LNdWB_fixed_Asm_F:
5282  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5283  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32: {
5284    MCInst TmpInst;
5285    // Shuffle the operands around so the lane index operand is in the
5286    // right place.
5287    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5288    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5289    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5290    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5291    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5292    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5293    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5294    TmpInst.addOperand(Inst.getOperand(1)); // lane
5295    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5296    TmpInst.addOperand(Inst.getOperand(5));
5297    Inst = TmpInst;
5298    return true;
5299  }
5300  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5301  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5302  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5303  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5304  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5305  case ARM::VST1LNdAsm_U32: {
5306    MCInst TmpInst;
5307    // Shuffle the operands around so the lane index operand is in the
5308    // right place.
5309    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5310    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5311    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5312    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5313    TmpInst.addOperand(Inst.getOperand(1)); // lane
5314    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5315    TmpInst.addOperand(Inst.getOperand(5));
5316    Inst = TmpInst;
5317    return true;
5318  }
5319
5320  case ARM::VST2LNdAsm_8: case ARM::VST2LNdAsm_P8: case ARM::VST2LNdAsm_I8:
5321  case ARM::VST2LNdAsm_S8: case ARM::VST2LNdAsm_U8: case ARM::VST2LNdAsm_16:
5322  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5323  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32: case ARM::VST2LNdAsm_F:
5324  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5325  case ARM::VST2LNdAsm_U32: {
5326    MCInst TmpInst;
5327    // Shuffle the operands around so the lane index operand is in the
5328    // right place.
5329    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5330    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5331    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5332    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5333    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5334    TmpInst.addOperand(Inst.getOperand(1)); // lane
5335    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5336    TmpInst.addOperand(Inst.getOperand(5));
5337    Inst = TmpInst;
5338    return true;
5339  }
5340  // Handle NEON VLD complex aliases.
5341  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5342  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5343  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5344  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5345  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5346  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5347  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5348  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5349    MCInst TmpInst;
5350    // Shuffle the operands around so the lane index operand is in the
5351    // right place.
5352    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5353    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5354    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5355    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5356    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5357    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5358    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5359    TmpInst.addOperand(Inst.getOperand(1)); // lane
5360    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5361    TmpInst.addOperand(Inst.getOperand(6));
5362    Inst = TmpInst;
5363    return true;
5364  }
5365
5366  case ARM::VLD2LNdWB_register_Asm_8: case ARM::VLD2LNdWB_register_Asm_P8:
5367  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5368  case ARM::VLD2LNdWB_register_Asm_U8: case ARM::VLD2LNdWB_register_Asm_16:
5369  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5370  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5371  case ARM::VLD2LNdWB_register_Asm_32: case ARM::VLD2LNdWB_register_Asm_F:
5372  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5373  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32: {
5374    MCInst TmpInst;
5375    // Shuffle the operands around so the lane index operand is in the
5376    // right place.
5377    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5378    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5379    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5380    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5381    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5382    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5383    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5384    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5385    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5386    TmpInst.addOperand(Inst.getOperand(1)); // lane
5387    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5388    TmpInst.addOperand(Inst.getOperand(6));
5389    Inst = TmpInst;
5390    return true;
5391  }
5392
5393  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5394  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5395  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5396  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5397  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5398  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5399  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5400  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5401    MCInst TmpInst;
5402    // Shuffle the operands around so the lane index operand is in the
5403    // right place.
5404    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5405    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5406    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5407    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5408    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5409    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5410    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5411    TmpInst.addOperand(Inst.getOperand(1)); // lane
5412    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5413    TmpInst.addOperand(Inst.getOperand(5));
5414    Inst = TmpInst;
5415    return true;
5416  }
5417
5418  case ARM::VLD2LNdWB_fixed_Asm_8: case ARM::VLD2LNdWB_fixed_Asm_P8:
5419  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5420  case ARM::VLD2LNdWB_fixed_Asm_U8: case ARM::VLD2LNdWB_fixed_Asm_16:
5421  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5422  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5423  case ARM::VLD2LNdWB_fixed_Asm_32: case ARM::VLD2LNdWB_fixed_Asm_F:
5424  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5425  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32: {
5426    MCInst TmpInst;
5427    // Shuffle the operands around so the lane index operand is in the
5428    // right place.
5429    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5430    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5431    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5432    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5433    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5434    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5435    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5436    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5437    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5438    TmpInst.addOperand(Inst.getOperand(1)); // lane
5439    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5440    TmpInst.addOperand(Inst.getOperand(5));
5441    Inst = TmpInst;
5442    return true;
5443  }
5444
5445  case ARM::VLD1LNdAsm_8: case ARM::VLD1LNdAsm_P8: case ARM::VLD1LNdAsm_I8:
5446  case ARM::VLD1LNdAsm_S8: case ARM::VLD1LNdAsm_U8: case ARM::VLD1LNdAsm_16:
5447  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5448  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32: case ARM::VLD1LNdAsm_F:
5449  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5450  case ARM::VLD1LNdAsm_U32: {
5451    MCInst TmpInst;
5452    // Shuffle the operands around so the lane index operand is in the
5453    // right place.
5454    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5455    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5456    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5457    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5458    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5459    TmpInst.addOperand(Inst.getOperand(1)); // lane
5460    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5461    TmpInst.addOperand(Inst.getOperand(5));
5462    Inst = TmpInst;
5463    return true;
5464  }
5465
5466  case ARM::VLD2LNdAsm_8: case ARM::VLD2LNdAsm_P8: case ARM::VLD2LNdAsm_I8:
5467  case ARM::VLD2LNdAsm_S8: case ARM::VLD2LNdAsm_U8: case ARM::VLD2LNdAsm_16:
5468  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5469  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32: case ARM::VLD2LNdAsm_F:
5470  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5471  case ARM::VLD2LNdAsm_U32: {
5472    MCInst TmpInst;
5473    // Shuffle the operands around so the lane index operand is in the
5474    // right place.
5475    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5476    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5477    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5478    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5479    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5480    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5481    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5482    TmpInst.addOperand(Inst.getOperand(1)); // lane
5483    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5484    TmpInst.addOperand(Inst.getOperand(5));
5485    Inst = TmpInst;
5486    return true;
5487  }
5488  // Handle the Thumb2 mode MOV complex aliases.
5489  case ARM::t2MOVsi:
5490  case ARM::t2MOVSsi: {
5491    // Which instruction to expand to depends on the CCOut operand and
5492    // whether we're in an IT block if the register operands are low
5493    // registers.
5494    bool isNarrow = false;
5495    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5496        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5497        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5498      isNarrow = true;
5499    MCInst TmpInst;
5500    unsigned newOpc;
5501    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5502    default: llvm_unreachable("unexpected opcode!");
5503    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5504    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5505    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5506    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5507    }
5508    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5509    if (Ammount == 32) Ammount = 0;
5510    TmpInst.setOpcode(newOpc);
5511    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5512    if (isNarrow)
5513      TmpInst.addOperand(MCOperand::CreateReg(
5514          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5515    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5516    TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5517    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5518    TmpInst.addOperand(Inst.getOperand(4));
5519    if (!isNarrow)
5520      TmpInst.addOperand(MCOperand::CreateReg(
5521          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5522    Inst = TmpInst;
5523    return true;
5524  }
5525  // Handle the ARM mode MOV complex aliases.
5526  case ARM::ASRr:
5527  case ARM::LSRr:
5528  case ARM::LSLr:
5529  case ARM::RORr: {
5530    ARM_AM::ShiftOpc ShiftTy;
5531    switch(Inst.getOpcode()) {
5532    default: llvm_unreachable("unexpected opcode!");
5533    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5534    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5535    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5536    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5537    }
5538    // A shift by zero is a plain MOVr, not a MOVsi.
5539    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5540    MCInst TmpInst;
5541    TmpInst.setOpcode(ARM::MOVsr);
5542    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5543    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5544    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5545    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5546    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5547    TmpInst.addOperand(Inst.getOperand(4));
5548    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5549    Inst = TmpInst;
5550    return true;
5551  }
5552  case ARM::ASRi:
5553  case ARM::LSRi:
5554  case ARM::LSLi:
5555  case ARM::RORi: {
5556    ARM_AM::ShiftOpc ShiftTy;
5557    switch(Inst.getOpcode()) {
5558    default: llvm_unreachable("unexpected opcode!");
5559    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5560    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5561    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5562    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5563    }
5564    // A shift by zero is a plain MOVr, not a MOVsi.
5565    unsigned Amt = Inst.getOperand(2).getImm();
5566    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5567    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5568    MCInst TmpInst;
5569    TmpInst.setOpcode(Opc);
5570    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5571    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5572    if (Opc == ARM::MOVsi)
5573      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5574    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5575    TmpInst.addOperand(Inst.getOperand(4));
5576    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5577    Inst = TmpInst;
5578    return true;
5579  }
5580  case ARM::RRXi: {
5581    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5582    MCInst TmpInst;
5583    TmpInst.setOpcode(ARM::MOVsi);
5584    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5585    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5586    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5587    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5588    TmpInst.addOperand(Inst.getOperand(3));
5589    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5590    Inst = TmpInst;
5591    return true;
5592  }
5593  case ARM::t2LDMIA_UPD: {
5594    // If this is a load of a single register, then we should use
5595    // a post-indexed LDR instruction instead, per the ARM ARM.
5596    if (Inst.getNumOperands() != 5)
5597      return false;
5598    MCInst TmpInst;
5599    TmpInst.setOpcode(ARM::t2LDR_POST);
5600    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5601    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5602    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5603    TmpInst.addOperand(MCOperand::CreateImm(4));
5604    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5605    TmpInst.addOperand(Inst.getOperand(3));
5606    Inst = TmpInst;
5607    return true;
5608  }
5609  case ARM::t2STMDB_UPD: {
5610    // If this is a store of a single register, then we should use
5611    // a pre-indexed STR instruction instead, per the ARM ARM.
5612    if (Inst.getNumOperands() != 5)
5613      return false;
5614    MCInst TmpInst;
5615    TmpInst.setOpcode(ARM::t2STR_PRE);
5616    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5617    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5618    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5619    TmpInst.addOperand(MCOperand::CreateImm(-4));
5620    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5621    TmpInst.addOperand(Inst.getOperand(3));
5622    Inst = TmpInst;
5623    return true;
5624  }
5625  case ARM::LDMIA_UPD:
5626    // If this is a load of a single register via a 'pop', then we should use
5627    // a post-indexed LDR instruction instead, per the ARM ARM.
5628    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5629        Inst.getNumOperands() == 5) {
5630      MCInst TmpInst;
5631      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5632      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5633      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5634      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5635      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5636      TmpInst.addOperand(MCOperand::CreateImm(4));
5637      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5638      TmpInst.addOperand(Inst.getOperand(3));
5639      Inst = TmpInst;
5640      return true;
5641    }
5642    break;
5643  case ARM::STMDB_UPD:
5644    // If this is a store of a single register via a 'push', then we should use
5645    // a pre-indexed STR instruction instead, per the ARM ARM.
5646    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5647        Inst.getNumOperands() == 5) {
5648      MCInst TmpInst;
5649      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5650      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5651      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5652      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5653      TmpInst.addOperand(MCOperand::CreateImm(-4));
5654      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5655      TmpInst.addOperand(Inst.getOperand(3));
5656      Inst = TmpInst;
5657    }
5658    break;
5659  case ARM::t2ADDri12:
5660    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5661    // mnemonic was used (not "addw"), encoding T3 is preferred.
5662    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5663        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5664      break;
5665    Inst.setOpcode(ARM::t2ADDri);
5666    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5667    break;
5668  case ARM::t2SUBri12:
5669    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5670    // mnemonic was used (not "subw"), encoding T3 is preferred.
5671    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5672        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5673      break;
5674    Inst.setOpcode(ARM::t2SUBri);
5675    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5676    break;
5677  case ARM::tADDi8:
5678    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5679    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5680    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5681    // to encoding T1 if <Rd> is omitted."
5682    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5683      Inst.setOpcode(ARM::tADDi3);
5684      return true;
5685    }
5686    break;
5687  case ARM::tSUBi8:
5688    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5689    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5690    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5691    // to encoding T1 if <Rd> is omitted."
5692    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5693      Inst.setOpcode(ARM::tSUBi3);
5694      return true;
5695    }
5696    break;
5697  case ARM::t2ADDrr: {
5698    // If the destination and first source operand are the same, and
5699    // there's no setting of the flags, use encoding T2 instead of T3.
5700    // Note that this is only for ADD, not SUB. This mirrors the system
5701    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5702    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5703        Inst.getOperand(5).getReg() != 0 ||
5704        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5705         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5706      break;
5707    MCInst TmpInst;
5708    TmpInst.setOpcode(ARM::tADDhirr);
5709    TmpInst.addOperand(Inst.getOperand(0));
5710    TmpInst.addOperand(Inst.getOperand(0));
5711    TmpInst.addOperand(Inst.getOperand(2));
5712    TmpInst.addOperand(Inst.getOperand(3));
5713    TmpInst.addOperand(Inst.getOperand(4));
5714    Inst = TmpInst;
5715    return true;
5716  }
5717  case ARM::tB:
5718    // A Thumb conditional branch outside of an IT block is a tBcc.
5719    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5720      Inst.setOpcode(ARM::tBcc);
5721      return true;
5722    }
5723    break;
5724  case ARM::t2B:
5725    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5726    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5727      Inst.setOpcode(ARM::t2Bcc);
5728      return true;
5729    }
5730    break;
5731  case ARM::t2Bcc:
5732    // If the conditional is AL or we're in an IT block, we really want t2B.
5733    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5734      Inst.setOpcode(ARM::t2B);
5735      return true;
5736    }
5737    break;
5738  case ARM::tBcc:
5739    // If the conditional is AL, we really want tB.
5740    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5741      Inst.setOpcode(ARM::tB);
5742      return true;
5743    }
5744    break;
5745  case ARM::tLDMIA: {
5746    // If the register list contains any high registers, or if the writeback
5747    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
5748    // instead if we're in Thumb2. Otherwise, this should have generated
5749    // an error in validateInstruction().
5750    unsigned Rn = Inst.getOperand(0).getReg();
5751    bool hasWritebackToken =
5752      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5753       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5754    bool listContainsBase;
5755    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
5756        (!listContainsBase && !hasWritebackToken) ||
5757        (listContainsBase && hasWritebackToken)) {
5758      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5759      assert (isThumbTwo());
5760      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
5761      // If we're switching to the updating version, we need to insert
5762      // the writeback tied operand.
5763      if (hasWritebackToken)
5764        Inst.insert(Inst.begin(),
5765                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
5766      return true;
5767    }
5768    break;
5769  }
5770  case ARM::tSTMIA_UPD: {
5771    // If the register list contains any high registers, we need to use
5772    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5773    // should have generated an error in validateInstruction().
5774    unsigned Rn = Inst.getOperand(0).getReg();
5775    bool listContainsBase;
5776    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
5777      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5778      assert (isThumbTwo());
5779      Inst.setOpcode(ARM::t2STMIA_UPD);
5780      return true;
5781    }
5782    break;
5783  }
5784  case ARM::tPOP: {
5785    bool listContainsBase;
5786    // If the register list contains any high registers, we need to use
5787    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5788    // should have generated an error in validateInstruction().
5789    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
5790      return false;
5791    assert (isThumbTwo());
5792    Inst.setOpcode(ARM::t2LDMIA_UPD);
5793    // Add the base register and writeback operands.
5794    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5795    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5796    return true;
5797  }
5798  case ARM::tPUSH: {
5799    bool listContainsBase;
5800    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
5801      return false;
5802    assert (isThumbTwo());
5803    Inst.setOpcode(ARM::t2STMDB_UPD);
5804    // Add the base register and writeback operands.
5805    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5806    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5807    return true;
5808  }
5809  case ARM::t2MOVi: {
5810    // If we can use the 16-bit encoding and the user didn't explicitly
5811    // request the 32-bit variant, transform it here.
5812    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5813        Inst.getOperand(1).getImm() <= 255 &&
5814        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
5815         Inst.getOperand(4).getReg() == ARM::CPSR) ||
5816        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
5817        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5818         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5819      // The operands aren't in the same order for tMOVi8...
5820      MCInst TmpInst;
5821      TmpInst.setOpcode(ARM::tMOVi8);
5822      TmpInst.addOperand(Inst.getOperand(0));
5823      TmpInst.addOperand(Inst.getOperand(4));
5824      TmpInst.addOperand(Inst.getOperand(1));
5825      TmpInst.addOperand(Inst.getOperand(2));
5826      TmpInst.addOperand(Inst.getOperand(3));
5827      Inst = TmpInst;
5828      return true;
5829    }
5830    break;
5831  }
5832  case ARM::t2MOVr: {
5833    // If we can use the 16-bit encoding and the user didn't explicitly
5834    // request the 32-bit variant, transform it here.
5835    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5836        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5837        Inst.getOperand(2).getImm() == ARMCC::AL &&
5838        Inst.getOperand(4).getReg() == ARM::CPSR &&
5839        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5840         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5841      // The operands aren't the same for tMOV[S]r... (no cc_out)
5842      MCInst TmpInst;
5843      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
5844      TmpInst.addOperand(Inst.getOperand(0));
5845      TmpInst.addOperand(Inst.getOperand(1));
5846      TmpInst.addOperand(Inst.getOperand(2));
5847      TmpInst.addOperand(Inst.getOperand(3));
5848      Inst = TmpInst;
5849      return true;
5850    }
5851    break;
5852  }
5853  case ARM::t2SXTH:
5854  case ARM::t2SXTB:
5855  case ARM::t2UXTH:
5856  case ARM::t2UXTB: {
5857    // If we can use the 16-bit encoding and the user didn't explicitly
5858    // request the 32-bit variant, transform it here.
5859    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5860        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5861        Inst.getOperand(2).getImm() == 0 &&
5862        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5863         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5864      unsigned NewOpc;
5865      switch (Inst.getOpcode()) {
5866      default: llvm_unreachable("Illegal opcode!");
5867      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
5868      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
5869      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
5870      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
5871      }
5872      // The operands aren't the same for thumb1 (no rotate operand).
5873      MCInst TmpInst;
5874      TmpInst.setOpcode(NewOpc);
5875      TmpInst.addOperand(Inst.getOperand(0));
5876      TmpInst.addOperand(Inst.getOperand(1));
5877      TmpInst.addOperand(Inst.getOperand(3));
5878      TmpInst.addOperand(Inst.getOperand(4));
5879      Inst = TmpInst;
5880      return true;
5881    }
5882    break;
5883  }
5884  case ARM::t2IT: {
5885    // The mask bits for all but the first condition are represented as
5886    // the low bit of the condition code value implies 't'. We currently
5887    // always have 1 implies 't', so XOR toggle the bits if the low bit
5888    // of the condition code is zero. The encoding also expects the low
5889    // bit of the condition to be encoded as bit 4 of the mask operand,
5890    // so mask that in if needed
5891    MCOperand &MO = Inst.getOperand(1);
5892    unsigned Mask = MO.getImm();
5893    unsigned OrigMask = Mask;
5894    unsigned TZ = CountTrailingZeros_32(Mask);
5895    if ((Inst.getOperand(0).getImm() & 1) == 0) {
5896      assert(Mask && TZ <= 3 && "illegal IT mask value!");
5897      for (unsigned i = 3; i != TZ; --i)
5898        Mask ^= 1 << i;
5899    } else
5900      Mask |= 0x10;
5901    MO.setImm(Mask);
5902
5903    // Set up the IT block state according to the IT instruction we just
5904    // matched.
5905    assert(!inITBlock() && "nested IT blocks?!");
5906    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
5907    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
5908    ITState.CurPosition = 0;
5909    ITState.FirstCond = true;
5910    break;
5911  }
5912  }
5913  return false;
5914}
5915
5916unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
5917  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
5918  // suffix depending on whether they're in an IT block or not.
5919  unsigned Opc = Inst.getOpcode();
5920  const MCInstrDesc &MCID = getInstDesc(Opc);
5921  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
5922    assert(MCID.hasOptionalDef() &&
5923           "optionally flag setting instruction missing optional def operand");
5924    assert(MCID.NumOperands == Inst.getNumOperands() &&
5925           "operand count mismatch!");
5926    // Find the optional-def operand (cc_out).
5927    unsigned OpNo;
5928    for (OpNo = 0;
5929         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
5930         ++OpNo)
5931      ;
5932    // If we're parsing Thumb1, reject it completely.
5933    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
5934      return Match_MnemonicFail;
5935    // If we're parsing Thumb2, which form is legal depends on whether we're
5936    // in an IT block.
5937    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
5938        !inITBlock())
5939      return Match_RequiresITBlock;
5940    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
5941        inITBlock())
5942      return Match_RequiresNotITBlock;
5943  }
5944  // Some high-register supporting Thumb1 encodings only allow both registers
5945  // to be from r0-r7 when in Thumb2.
5946  else if (Opc == ARM::tADDhirr && isThumbOne() &&
5947           isARMLowRegister(Inst.getOperand(1).getReg()) &&
5948           isARMLowRegister(Inst.getOperand(2).getReg()))
5949    return Match_RequiresThumb2;
5950  // Others only require ARMv6 or later.
5951  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
5952           isARMLowRegister(Inst.getOperand(0).getReg()) &&
5953           isARMLowRegister(Inst.getOperand(1).getReg()))
5954    return Match_RequiresV6;
5955  return Match_Success;
5956}
5957
5958bool ARMAsmParser::
5959MatchAndEmitInstruction(SMLoc IDLoc,
5960                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
5961                        MCStreamer &Out) {
5962  MCInst Inst;
5963  unsigned ErrorInfo;
5964  unsigned MatchResult;
5965  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
5966  switch (MatchResult) {
5967  default: break;
5968  case Match_Success:
5969    // Context sensitive operand constraints aren't handled by the matcher,
5970    // so check them here.
5971    if (validateInstruction(Inst, Operands)) {
5972      // Still progress the IT block, otherwise one wrong condition causes
5973      // nasty cascading errors.
5974      forwardITPosition();
5975      return true;
5976    }
5977
5978    // Some instructions need post-processing to, for example, tweak which
5979    // encoding is selected. Loop on it while changes happen so the
5980    // individual transformations can chain off each other. E.g.,
5981    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
5982    while (processInstruction(Inst, Operands))
5983      ;
5984
5985    // Only move forward at the very end so that everything in validate
5986    // and process gets a consistent answer about whether we're in an IT
5987    // block.
5988    forwardITPosition();
5989
5990    Out.EmitInstruction(Inst);
5991    return false;
5992  case Match_MissingFeature:
5993    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
5994    return true;
5995  case Match_InvalidOperand: {
5996    SMLoc ErrorLoc = IDLoc;
5997    if (ErrorInfo != ~0U) {
5998      if (ErrorInfo >= Operands.size())
5999        return Error(IDLoc, "too few operands for instruction");
6000
6001      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6002      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6003    }
6004
6005    return Error(ErrorLoc, "invalid operand for instruction");
6006  }
6007  case Match_MnemonicFail:
6008    return Error(IDLoc, "invalid instruction");
6009  case Match_ConversionFail:
6010    // The converter function will have already emited a diagnostic.
6011    return true;
6012  case Match_RequiresNotITBlock:
6013    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6014  case Match_RequiresITBlock:
6015    return Error(IDLoc, "instruction only valid inside IT block");
6016  case Match_RequiresV6:
6017    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6018  case Match_RequiresThumb2:
6019    return Error(IDLoc, "instruction variant requires Thumb2");
6020  }
6021
6022  llvm_unreachable("Implement any new match types added!");
6023  return true;
6024}
6025
6026/// parseDirective parses the arm specific directives
6027bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6028  StringRef IDVal = DirectiveID.getIdentifier();
6029  if (IDVal == ".word")
6030    return parseDirectiveWord(4, DirectiveID.getLoc());
6031  else if (IDVal == ".thumb")
6032    return parseDirectiveThumb(DirectiveID.getLoc());
6033  else if (IDVal == ".arm")
6034    return parseDirectiveARM(DirectiveID.getLoc());
6035  else if (IDVal == ".thumb_func")
6036    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6037  else if (IDVal == ".code")
6038    return parseDirectiveCode(DirectiveID.getLoc());
6039  else if (IDVal == ".syntax")
6040    return parseDirectiveSyntax(DirectiveID.getLoc());
6041  else if (IDVal == ".unreq")
6042    return parseDirectiveUnreq(DirectiveID.getLoc());
6043  return true;
6044}
6045
6046/// parseDirectiveWord
6047///  ::= .word [ expression (, expression)* ]
6048bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6049  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6050    for (;;) {
6051      const MCExpr *Value;
6052      if (getParser().ParseExpression(Value))
6053        return true;
6054
6055      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6056
6057      if (getLexer().is(AsmToken::EndOfStatement))
6058        break;
6059
6060      // FIXME: Improve diagnostic.
6061      if (getLexer().isNot(AsmToken::Comma))
6062        return Error(L, "unexpected token in directive");
6063      Parser.Lex();
6064    }
6065  }
6066
6067  Parser.Lex();
6068  return false;
6069}
6070
6071/// parseDirectiveThumb
6072///  ::= .thumb
6073bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6074  if (getLexer().isNot(AsmToken::EndOfStatement))
6075    return Error(L, "unexpected token in directive");
6076  Parser.Lex();
6077
6078  if (!isThumb())
6079    SwitchMode();
6080  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6081  return false;
6082}
6083
6084/// parseDirectiveARM
6085///  ::= .arm
6086bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6087  if (getLexer().isNot(AsmToken::EndOfStatement))
6088    return Error(L, "unexpected token in directive");
6089  Parser.Lex();
6090
6091  if (isThumb())
6092    SwitchMode();
6093  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6094  return false;
6095}
6096
6097/// parseDirectiveThumbFunc
6098///  ::= .thumbfunc symbol_name
6099bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6100  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6101  bool isMachO = MAI.hasSubsectionsViaSymbols();
6102  StringRef Name;
6103
6104  // Darwin asm has function name after .thumb_func direction
6105  // ELF doesn't
6106  if (isMachO) {
6107    const AsmToken &Tok = Parser.getTok();
6108    if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6109      return Error(L, "unexpected token in .thumb_func directive");
6110    Name = Tok.getIdentifier();
6111    Parser.Lex(); // Consume the identifier token.
6112  }
6113
6114 if (getLexer().isNot(AsmToken::EndOfStatement))
6115    return Error(L, "unexpected token in directive");
6116  Parser.Lex();
6117
6118  // FIXME: assuming function name will be the line following .thumb_func
6119  if (!isMachO) {
6120    Name = Parser.getTok().getIdentifier();
6121  }
6122
6123  // Mark symbol as a thumb symbol.
6124  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6125  getParser().getStreamer().EmitThumbFunc(Func);
6126  return false;
6127}
6128
6129/// parseDirectiveSyntax
6130///  ::= .syntax unified | divided
6131bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6132  const AsmToken &Tok = Parser.getTok();
6133  if (Tok.isNot(AsmToken::Identifier))
6134    return Error(L, "unexpected token in .syntax directive");
6135  StringRef Mode = Tok.getString();
6136  if (Mode == "unified" || Mode == "UNIFIED")
6137    Parser.Lex();
6138  else if (Mode == "divided" || Mode == "DIVIDED")
6139    return Error(L, "'.syntax divided' arm asssembly not supported");
6140  else
6141    return Error(L, "unrecognized syntax mode in .syntax directive");
6142
6143  if (getLexer().isNot(AsmToken::EndOfStatement))
6144    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6145  Parser.Lex();
6146
6147  // TODO tell the MC streamer the mode
6148  // getParser().getStreamer().Emit???();
6149  return false;
6150}
6151
6152/// parseDirectiveCode
6153///  ::= .code 16 | 32
6154bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6155  const AsmToken &Tok = Parser.getTok();
6156  if (Tok.isNot(AsmToken::Integer))
6157    return Error(L, "unexpected token in .code directive");
6158  int64_t Val = Parser.getTok().getIntVal();
6159  if (Val == 16)
6160    Parser.Lex();
6161  else if (Val == 32)
6162    Parser.Lex();
6163  else
6164    return Error(L, "invalid operand to .code directive");
6165
6166  if (getLexer().isNot(AsmToken::EndOfStatement))
6167    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6168  Parser.Lex();
6169
6170  if (Val == 16) {
6171    if (!isThumb())
6172      SwitchMode();
6173    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6174  } else {
6175    if (isThumb())
6176      SwitchMode();
6177    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6178  }
6179
6180  return false;
6181}
6182
6183/// parseDirectiveReq
6184///  ::= name .req registername
6185bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6186  Parser.Lex(); // Eat the '.req' token.
6187  unsigned Reg;
6188  SMLoc SRegLoc, ERegLoc;
6189  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6190    Parser.EatToEndOfStatement();
6191    return Error(SRegLoc, "register name expected");
6192  }
6193
6194  // Shouldn't be anything else.
6195  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6196    Parser.EatToEndOfStatement();
6197    return Error(Parser.getTok().getLoc(),
6198                 "unexpected input in .req directive.");
6199  }
6200
6201  Parser.Lex(); // Consume the EndOfStatement
6202
6203  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6204    return Error(SRegLoc, "redefinition of '" + Name +
6205                          "' does not match original.");
6206
6207  return false;
6208}
6209
6210/// parseDirectiveUneq
6211///  ::= .unreq registername
6212bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6213  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6214    Parser.EatToEndOfStatement();
6215    return Error(L, "unexpected input in .unreq directive.");
6216  }
6217  RegisterReqs.erase(Parser.getTok().getIdentifier());
6218  Parser.Lex(); // Eat the identifier.
6219  return false;
6220}
6221
6222extern "C" void LLVMInitializeARMAsmLexer();
6223
6224/// Force static initialization.
6225extern "C" void LLVMInitializeARMAsmParser() {
6226  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6227  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6228  LLVMInitializeARMAsmLexer();
6229}
6230
6231#define GET_REGISTER_MATCHER
6232#define GET_MATCHER_IMPLEMENTATION
6233#include "ARMGenAsmMatcher.inc"
6234