ARMAsmParser.cpp revision 2f196747f15240691bd4e622f7995edfedf90f61
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104
105  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
106                          bool &CarrySetting, unsigned &ProcessorIMod,
107                          StringRef &ITMask);
108  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
109                             bool &CanAcceptPredicationCode);
110
111  bool isThumb() const {
112    // FIXME: Can tablegen auto-generate this?
113    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
114  }
115  bool isThumbOne() const {
116    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
117  }
118  bool isThumbTwo() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
120  }
121  bool hasV6Ops() const {
122    return STI.getFeatureBits() & ARM::HasV6Ops;
123  }
124  bool hasV7Ops() const {
125    return STI.getFeatureBits() & ARM::HasV7Ops;
126  }
127  void SwitchMode() {
128    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
129    setAvailableFeatures(FB);
130  }
131  bool isMClass() const {
132    return STI.getFeatureBits() & ARM::FeatureMClass;
133  }
134
135  /// @name Auto-generated Match Functions
136  /// {
137
138#define GET_ASSEMBLER_HEADER
139#include "ARMGenAsmMatcher.inc"
140
141  /// }
142
143  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
144  OperandMatchResultTy parseCoprocNumOperand(
145    SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocRegOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocOptionOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseMemBarrierOptOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseProcIFlagsOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseMSRMaskOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
157                                   StringRef Op, int Low, int High);
158  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
159    return parsePKHImm(O, "lsl", 0, 31);
160  }
161  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "asr", 1, 32);
163  }
164  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
173
174  // Asm Match Converter Methods
175  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
176                    const SmallVectorImpl<MCParsedAsmOperand*> &);
177  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
180                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
194                             const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
202                  const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
206                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
208                        const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
210                     const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
212                        const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
214                     const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
216                        const SmallVectorImpl<MCParsedAsmOperand*> &);
217
218  bool validateInstruction(MCInst &Inst,
219                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
220  bool processInstruction(MCInst &Inst,
221                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool shouldOmitCCOutOperand(StringRef Mnemonic,
223                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
224
225public:
226  enum ARMMatchResultTy {
227    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
228    Match_RequiresNotITBlock,
229    Match_RequiresV6,
230    Match_RequiresThumb2
231  };
232
233  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
234    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
235    MCAsmParserExtension::Initialize(_Parser);
236
237    // Initialize the set of available features.
238    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
239
240    // Not in an ITBlock to start with.
241    ITState.CurPosition = ~0U;
242  }
243
244  // Implementation of the MCTargetAsmParser interface:
245  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
246  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
247                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
248  bool ParseDirective(AsmToken DirectiveID);
249
250  unsigned checkTargetMatchPredicate(MCInst &Inst);
251
252  bool MatchAndEmitInstruction(SMLoc IDLoc,
253                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
254                               MCStreamer &Out);
255};
256} // end anonymous namespace
257
258namespace {
259
260/// ARMOperand - Instances of this class represent a parsed ARM machine
261/// instruction.
262class ARMOperand : public MCParsedAsmOperand {
263  enum KindTy {
264    k_CondCode,
265    k_CCOut,
266    k_ITCondMask,
267    k_CoprocNum,
268    k_CoprocReg,
269    k_CoprocOption,
270    k_Immediate,
271    k_FPImmediate,
272    k_MemBarrierOpt,
273    k_Memory,
274    k_PostIndexRegister,
275    k_MSRMask,
276    k_ProcIFlags,
277    k_VectorIndex,
278    k_Register,
279    k_RegisterList,
280    k_DPRRegisterList,
281    k_SPRRegisterList,
282    k_VectorList,
283    k_VectorListAllLanes,
284    k_VectorListIndexed,
285    k_ShiftedRegister,
286    k_ShiftedImmediate,
287    k_ShifterImmediate,
288    k_RotateImmediate,
289    k_BitfieldDescriptor,
290    k_Token
291  } Kind;
292
293  SMLoc StartLoc, EndLoc;
294  SmallVector<unsigned, 8> Registers;
295
296  union {
297    struct {
298      ARMCC::CondCodes Val;
299    } CC;
300
301    struct {
302      unsigned Val;
303    } Cop;
304
305    struct {
306      unsigned Val;
307    } CoprocOption;
308
309    struct {
310      unsigned Mask:4;
311    } ITMask;
312
313    struct {
314      ARM_MB::MemBOpt Val;
315    } MBOpt;
316
317    struct {
318      ARM_PROC::IFlags Val;
319    } IFlags;
320
321    struct {
322      unsigned Val;
323    } MMask;
324
325    struct {
326      const char *Data;
327      unsigned Length;
328    } Tok;
329
330    struct {
331      unsigned RegNum;
332    } Reg;
333
334    // A vector register list is a sequential list of 1 to 4 registers.
335    struct {
336      unsigned RegNum;
337      unsigned Count;
338      unsigned LaneIndex;
339      bool isDoubleSpaced;
340    } VectorList;
341
342    struct {
343      unsigned Val;
344    } VectorIndex;
345
346    struct {
347      const MCExpr *Val;
348    } Imm;
349
350    struct {
351      unsigned Val;       // encoded 8-bit representation
352    } FPImm;
353
354    /// Combined record for all forms of ARM address expressions.
355    struct {
356      unsigned BaseRegNum;
357      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
358      // was specified.
359      const MCConstantExpr *OffsetImm;  // Offset immediate value
360      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
361      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
362      unsigned ShiftImm;        // shift for OffsetReg.
363      unsigned Alignment;       // 0 = no alignment specified
364                                // n = alignment in bytes (2, 4, 8, 16, or 32)
365      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
366    } Memory;
367
368    struct {
369      unsigned RegNum;
370      bool isAdd;
371      ARM_AM::ShiftOpc ShiftTy;
372      unsigned ShiftImm;
373    } PostIdxReg;
374
375    struct {
376      bool isASR;
377      unsigned Imm;
378    } ShifterImm;
379    struct {
380      ARM_AM::ShiftOpc ShiftTy;
381      unsigned SrcReg;
382      unsigned ShiftReg;
383      unsigned ShiftImm;
384    } RegShiftedReg;
385    struct {
386      ARM_AM::ShiftOpc ShiftTy;
387      unsigned SrcReg;
388      unsigned ShiftImm;
389    } RegShiftedImm;
390    struct {
391      unsigned Imm;
392    } RotImm;
393    struct {
394      unsigned LSB;
395      unsigned Width;
396    } Bitfield;
397  };
398
399  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
400public:
401  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
402    Kind = o.Kind;
403    StartLoc = o.StartLoc;
404    EndLoc = o.EndLoc;
405    switch (Kind) {
406    case k_CondCode:
407      CC = o.CC;
408      break;
409    case k_ITCondMask:
410      ITMask = o.ITMask;
411      break;
412    case k_Token:
413      Tok = o.Tok;
414      break;
415    case k_CCOut:
416    case k_Register:
417      Reg = o.Reg;
418      break;
419    case k_RegisterList:
420    case k_DPRRegisterList:
421    case k_SPRRegisterList:
422      Registers = o.Registers;
423      break;
424    case k_VectorList:
425    case k_VectorListAllLanes:
426    case k_VectorListIndexed:
427      VectorList = o.VectorList;
428      break;
429    case k_CoprocNum:
430    case k_CoprocReg:
431      Cop = o.Cop;
432      break;
433    case k_CoprocOption:
434      CoprocOption = o.CoprocOption;
435      break;
436    case k_Immediate:
437      Imm = o.Imm;
438      break;
439    case k_FPImmediate:
440      FPImm = o.FPImm;
441      break;
442    case k_MemBarrierOpt:
443      MBOpt = o.MBOpt;
444      break;
445    case k_Memory:
446      Memory = o.Memory;
447      break;
448    case k_PostIndexRegister:
449      PostIdxReg = o.PostIdxReg;
450      break;
451    case k_MSRMask:
452      MMask = o.MMask;
453      break;
454    case k_ProcIFlags:
455      IFlags = o.IFlags;
456      break;
457    case k_ShifterImmediate:
458      ShifterImm = o.ShifterImm;
459      break;
460    case k_ShiftedRegister:
461      RegShiftedReg = o.RegShiftedReg;
462      break;
463    case k_ShiftedImmediate:
464      RegShiftedImm = o.RegShiftedImm;
465      break;
466    case k_RotateImmediate:
467      RotImm = o.RotImm;
468      break;
469    case k_BitfieldDescriptor:
470      Bitfield = o.Bitfield;
471      break;
472    case k_VectorIndex:
473      VectorIndex = o.VectorIndex;
474      break;
475    }
476  }
477
478  /// getStartLoc - Get the location of the first token of this operand.
479  SMLoc getStartLoc() const { return StartLoc; }
480  /// getEndLoc - Get the location of the last token of this operand.
481  SMLoc getEndLoc() const { return EndLoc; }
482
483  ARMCC::CondCodes getCondCode() const {
484    assert(Kind == k_CondCode && "Invalid access!");
485    return CC.Val;
486  }
487
488  unsigned getCoproc() const {
489    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
490    return Cop.Val;
491  }
492
493  StringRef getToken() const {
494    assert(Kind == k_Token && "Invalid access!");
495    return StringRef(Tok.Data, Tok.Length);
496  }
497
498  unsigned getReg() const {
499    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
500    return Reg.RegNum;
501  }
502
503  const SmallVectorImpl<unsigned> &getRegList() const {
504    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
505            Kind == k_SPRRegisterList) && "Invalid access!");
506    return Registers;
507  }
508
509  const MCExpr *getImm() const {
510    assert(Kind == k_Immediate && "Invalid access!");
511    return Imm.Val;
512  }
513
514  unsigned getFPImm() const {
515    assert(Kind == k_FPImmediate && "Invalid access!");
516    return FPImm.Val;
517  }
518
519  unsigned getVectorIndex() const {
520    assert(Kind == k_VectorIndex && "Invalid access!");
521    return VectorIndex.Val;
522  }
523
524  ARM_MB::MemBOpt getMemBarrierOpt() const {
525    assert(Kind == k_MemBarrierOpt && "Invalid access!");
526    return MBOpt.Val;
527  }
528
529  ARM_PROC::IFlags getProcIFlags() const {
530    assert(Kind == k_ProcIFlags && "Invalid access!");
531    return IFlags.Val;
532  }
533
534  unsigned getMSRMask() const {
535    assert(Kind == k_MSRMask && "Invalid access!");
536    return MMask.Val;
537  }
538
539  bool isCoprocNum() const { return Kind == k_CoprocNum; }
540  bool isCoprocReg() const { return Kind == k_CoprocReg; }
541  bool isCoprocOption() const { return Kind == k_CoprocOption; }
542  bool isCondCode() const { return Kind == k_CondCode; }
543  bool isCCOut() const { return Kind == k_CCOut; }
544  bool isITMask() const { return Kind == k_ITCondMask; }
545  bool isITCondCode() const { return Kind == k_CondCode; }
546  bool isImm() const { return Kind == k_Immediate; }
547  bool isFPImm() const { return Kind == k_FPImmediate; }
548  bool isImm8s4() const {
549    if (Kind != k_Immediate)
550      return false;
551    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
552    if (!CE) return false;
553    int64_t Value = CE->getValue();
554    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
555  }
556  bool isImm0_1020s4() const {
557    if (Kind != k_Immediate)
558      return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
563  }
564  bool isImm0_508s4() const {
565    if (Kind != k_Immediate)
566      return false;
567    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
568    if (!CE) return false;
569    int64_t Value = CE->getValue();
570    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
571  }
572  bool isImm0_255() const {
573    if (Kind != k_Immediate)
574      return false;
575    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
576    if (!CE) return false;
577    int64_t Value = CE->getValue();
578    return Value >= 0 && Value < 256;
579  }
580  bool isImm0_1() const {
581    if (Kind != k_Immediate)
582      return false;
583    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
584    if (!CE) return false;
585    int64_t Value = CE->getValue();
586    return Value >= 0 && Value < 2;
587  }
588  bool isImm0_3() const {
589    if (Kind != k_Immediate)
590      return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 4;
595  }
596  bool isImm0_7() const {
597    if (Kind != k_Immediate)
598      return false;
599    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
600    if (!CE) return false;
601    int64_t Value = CE->getValue();
602    return Value >= 0 && Value < 8;
603  }
604  bool isImm0_15() const {
605    if (Kind != k_Immediate)
606      return false;
607    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
608    if (!CE) return false;
609    int64_t Value = CE->getValue();
610    return Value >= 0 && Value < 16;
611  }
612  bool isImm0_31() const {
613    if (Kind != k_Immediate)
614      return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (Kind != k_Immediate)
622      return false;
623    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
624    if (!CE) return false;
625    int64_t Value = CE->getValue();
626    return Value >= 0 && Value < 64;
627  }
628  bool isImm8() const {
629    if (Kind != k_Immediate)
630      return false;
631    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
632    if (!CE) return false;
633    int64_t Value = CE->getValue();
634    return Value == 8;
635  }
636  bool isImm16() const {
637    if (Kind != k_Immediate)
638      return false;
639    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
640    if (!CE) return false;
641    int64_t Value = CE->getValue();
642    return Value == 16;
643  }
644  bool isImm32() const {
645    if (Kind != k_Immediate)
646      return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (Kind != k_Immediate)
654      return false;
655    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
656    if (!CE) return false;
657    int64_t Value = CE->getValue();
658    return Value > 0 && Value <= 8;
659  }
660  bool isShrImm16() const {
661    if (Kind != k_Immediate)
662      return false;
663    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664    if (!CE) return false;
665    int64_t Value = CE->getValue();
666    return Value > 0 && Value <= 16;
667  }
668  bool isShrImm32() const {
669    if (Kind != k_Immediate)
670      return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 32;
675  }
676  bool isShrImm64() const {
677    if (Kind != k_Immediate)
678      return false;
679    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
680    if (!CE) return false;
681    int64_t Value = CE->getValue();
682    return Value > 0 && Value <= 64;
683  }
684  bool isImm1_7() const {
685    if (Kind != k_Immediate)
686      return false;
687    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
688    if (!CE) return false;
689    int64_t Value = CE->getValue();
690    return Value > 0 && Value < 8;
691  }
692  bool isImm1_15() const {
693    if (Kind != k_Immediate)
694      return false;
695    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
696    if (!CE) return false;
697    int64_t Value = CE->getValue();
698    return Value > 0 && Value < 16;
699  }
700  bool isImm1_31() const {
701    if (Kind != k_Immediate)
702      return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 32;
707  }
708  bool isImm1_16() const {
709    if (Kind != k_Immediate)
710      return false;
711    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712    if (!CE) return false;
713    int64_t Value = CE->getValue();
714    return Value > 0 && Value < 17;
715  }
716  bool isImm1_32() const {
717    if (Kind != k_Immediate)
718      return false;
719    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
720    if (!CE) return false;
721    int64_t Value = CE->getValue();
722    return Value > 0 && Value < 33;
723  }
724  bool isImm0_32() const {
725    if (Kind != k_Immediate)
726      return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    if (!CE) return false;
729    int64_t Value = CE->getValue();
730    return Value >= 0 && Value < 33;
731  }
732  bool isImm0_65535() const {
733    if (Kind != k_Immediate)
734      return false;
735    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
736    if (!CE) return false;
737    int64_t Value = CE->getValue();
738    return Value >= 0 && Value < 65536;
739  }
740  bool isImm0_65535Expr() const {
741    if (Kind != k_Immediate)
742      return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    // If it's not a constant expression, it'll generate a fixup and be
745    // handled later.
746    if (!CE) return true;
747    int64_t Value = CE->getValue();
748    return Value >= 0 && Value < 65536;
749  }
750  bool isImm24bit() const {
751    if (Kind != k_Immediate)
752      return false;
753    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754    if (!CE) return false;
755    int64_t Value = CE->getValue();
756    return Value >= 0 && Value <= 0xffffff;
757  }
758  bool isImmThumbSR() const {
759    if (Kind != k_Immediate)
760      return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value < 33;
765  }
766  bool isPKHLSLImm() const {
767    if (Kind != k_Immediate)
768      return false;
769    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
770    if (!CE) return false;
771    int64_t Value = CE->getValue();
772    return Value >= 0 && Value < 32;
773  }
774  bool isPKHASRImm() const {
775    if (Kind != k_Immediate)
776      return false;
777    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778    if (!CE) return false;
779    int64_t Value = CE->getValue();
780    return Value > 0 && Value <= 32;
781  }
782  bool isARMSOImm() const {
783    if (Kind != k_Immediate)
784      return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getSOImmVal(Value) != -1;
789  }
790  bool isARMSOImmNot() const {
791    if (Kind != k_Immediate)
792      return false;
793    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
794    if (!CE) return false;
795    int64_t Value = CE->getValue();
796    return ARM_AM::getSOImmVal(~Value) != -1;
797  }
798  bool isARMSOImmNeg() const {
799    if (Kind != k_Immediate)
800      return false;
801    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
802    if (!CE) return false;
803    int64_t Value = CE->getValue();
804    return ARM_AM::getSOImmVal(-Value) != -1;
805  }
806  bool isT2SOImm() const {
807    if (Kind != k_Immediate)
808      return false;
809    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
810    if (!CE) return false;
811    int64_t Value = CE->getValue();
812    return ARM_AM::getT2SOImmVal(Value) != -1;
813  }
814  bool isT2SOImmNot() const {
815    if (Kind != k_Immediate)
816      return false;
817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
818    if (!CE) return false;
819    int64_t Value = CE->getValue();
820    return ARM_AM::getT2SOImmVal(~Value) != -1;
821  }
822  bool isT2SOImmNeg() const {
823    if (Kind != k_Immediate)
824      return false;
825    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
826    if (!CE) return false;
827    int64_t Value = CE->getValue();
828    return ARM_AM::getT2SOImmVal(-Value) != -1;
829  }
830  bool isSetEndImm() const {
831    if (Kind != k_Immediate)
832      return false;
833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
834    if (!CE) return false;
835    int64_t Value = CE->getValue();
836    return Value == 1 || Value == 0;
837  }
838  bool isReg() const { return Kind == k_Register; }
839  bool isRegList() const { return Kind == k_RegisterList; }
840  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
841  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
842  bool isToken() const { return Kind == k_Token; }
843  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
844  bool isMemory() const { return Kind == k_Memory; }
845  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
846  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
847  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
848  bool isRotImm() const { return Kind == k_RotateImmediate; }
849  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
850  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
851  bool isPostIdxReg() const {
852    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
853  }
854  bool isMemNoOffset(bool alignOK = false) const {
855    if (!isMemory())
856      return false;
857    // No offset of any kind.
858    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
859     (alignOK || Memory.Alignment == 0);
860  }
861  bool isAlignedMemory() const {
862    return isMemNoOffset(true);
863  }
864  bool isAddrMode2() const {
865    if (!isMemory() || Memory.Alignment != 0) return false;
866    // Check for register offset.
867    if (Memory.OffsetRegNum) return true;
868    // Immediate offset in range [-4095, 4095].
869    if (!Memory.OffsetImm) return true;
870    int64_t Val = Memory.OffsetImm->getValue();
871    return Val > -4096 && Val < 4096;
872  }
873  bool isAM2OffsetImm() const {
874    if (Kind != k_Immediate)
875      return false;
876    // Immediate offset in range [-4095, 4095].
877    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
878    if (!CE) return false;
879    int64_t Val = CE->getValue();
880    return Val > -4096 && Val < 4096;
881  }
882  bool isAddrMode3() const {
883    // If we have an immediate that's not a constant, treat it as a label
884    // reference needing a fixup. If it is a constant, it's something else
885    // and we reject it.
886    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
887      return true;
888    if (!isMemory() || Memory.Alignment != 0) return false;
889    // No shifts are legal for AM3.
890    if (Memory.ShiftType != ARM_AM::no_shift) return false;
891    // Check for register offset.
892    if (Memory.OffsetRegNum) return true;
893    // Immediate offset in range [-255, 255].
894    if (!Memory.OffsetImm) return true;
895    int64_t Val = Memory.OffsetImm->getValue();
896    return Val > -256 && Val < 256;
897  }
898  bool isAM3Offset() const {
899    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
900      return false;
901    if (Kind == k_PostIndexRegister)
902      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
903    // Immediate offset in range [-255, 255].
904    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
905    if (!CE) return false;
906    int64_t Val = CE->getValue();
907    // Special case, #-0 is INT32_MIN.
908    return (Val > -256 && Val < 256) || Val == INT32_MIN;
909  }
910  bool isAddrMode5() const {
911    // If we have an immediate that's not a constant, treat it as a label
912    // reference needing a fixup. If it is a constant, it's something else
913    // and we reject it.
914    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
915      return true;
916    if (!isMemory() || Memory.Alignment != 0) return false;
917    // Check for register offset.
918    if (Memory.OffsetRegNum) return false;
919    // Immediate offset in range [-1020, 1020] and a multiple of 4.
920    if (!Memory.OffsetImm) return true;
921    int64_t Val = Memory.OffsetImm->getValue();
922    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
923      Val == INT32_MIN;
924  }
925  bool isMemTBB() const {
926    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
927        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
928      return false;
929    return true;
930  }
931  bool isMemTBH() const {
932    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
933        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
934        Memory.Alignment != 0 )
935      return false;
936    return true;
937  }
938  bool isMemRegOffset() const {
939    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
940      return false;
941    return true;
942  }
943  bool isT2MemRegOffset() const {
944    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
945        Memory.Alignment != 0)
946      return false;
947    // Only lsl #{0, 1, 2, 3} allowed.
948    if (Memory.ShiftType == ARM_AM::no_shift)
949      return true;
950    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
951      return false;
952    return true;
953  }
954  bool isMemThumbRR() const {
955    // Thumb reg+reg addressing is simple. Just two registers, a base and
956    // an offset. No shifts, negations or any other complicating factors.
957    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
958        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
959      return false;
960    return isARMLowRegister(Memory.BaseRegNum) &&
961      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
962  }
963  bool isMemThumbRIs4() const {
964    if (!isMemory() || Memory.OffsetRegNum != 0 ||
965        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
966      return false;
967    // Immediate offset, multiple of 4 in range [0, 124].
968    if (!Memory.OffsetImm) return true;
969    int64_t Val = Memory.OffsetImm->getValue();
970    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
971  }
972  bool isMemThumbRIs2() const {
973    if (!isMemory() || Memory.OffsetRegNum != 0 ||
974        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
975      return false;
976    // Immediate offset, multiple of 4 in range [0, 62].
977    if (!Memory.OffsetImm) return true;
978    int64_t Val = Memory.OffsetImm->getValue();
979    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
980  }
981  bool isMemThumbRIs1() const {
982    if (!isMemory() || Memory.OffsetRegNum != 0 ||
983        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
984      return false;
985    // Immediate offset in range [0, 31].
986    if (!Memory.OffsetImm) return true;
987    int64_t Val = Memory.OffsetImm->getValue();
988    return Val >= 0 && Val <= 31;
989  }
990  bool isMemThumbSPI() const {
991    if (!isMemory() || Memory.OffsetRegNum != 0 ||
992        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
993      return false;
994    // Immediate offset, multiple of 4 in range [0, 1020].
995    if (!Memory.OffsetImm) return true;
996    int64_t Val = Memory.OffsetImm->getValue();
997    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
998  }
999  bool isMemImm8s4Offset() const {
1000    // If we have an immediate that's not a constant, treat it as a label
1001    // reference needing a fixup. If it is a constant, it's something else
1002    // and we reject it.
1003    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1004      return true;
1005    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1006      return false;
1007    // Immediate offset a multiple of 4 in range [-1020, 1020].
1008    if (!Memory.OffsetImm) return true;
1009    int64_t Val = Memory.OffsetImm->getValue();
1010    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1011  }
1012  bool isMemImm0_1020s4Offset() const {
1013    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1014      return false;
1015    // Immediate offset a multiple of 4 in range [0, 1020].
1016    if (!Memory.OffsetImm) return true;
1017    int64_t Val = Memory.OffsetImm->getValue();
1018    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1019  }
1020  bool isMemImm8Offset() const {
1021    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1022      return false;
1023    // Immediate offset in range [-255, 255].
1024    if (!Memory.OffsetImm) return true;
1025    int64_t Val = Memory.OffsetImm->getValue();
1026    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1027  }
1028  bool isMemPosImm8Offset() const {
1029    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1030      return false;
1031    // Immediate offset in range [0, 255].
1032    if (!Memory.OffsetImm) return true;
1033    int64_t Val = Memory.OffsetImm->getValue();
1034    return Val >= 0 && Val < 256;
1035  }
1036  bool isMemNegImm8Offset() const {
1037    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1038      return false;
1039    // Immediate offset in range [-255, -1].
1040    if (!Memory.OffsetImm) return false;
1041    int64_t Val = Memory.OffsetImm->getValue();
1042    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1043  }
1044  bool isMemUImm12Offset() const {
1045    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1046      return false;
1047    // Immediate offset in range [0, 4095].
1048    if (!Memory.OffsetImm) return true;
1049    int64_t Val = Memory.OffsetImm->getValue();
1050    return (Val >= 0 && Val < 4096);
1051  }
1052  bool isMemImm12Offset() const {
1053    // If we have an immediate that's not a constant, treat it as a label
1054    // reference needing a fixup. If it is a constant, it's something else
1055    // and we reject it.
1056    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1057      return true;
1058
1059    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1060      return false;
1061    // Immediate offset in range [-4095, 4095].
1062    if (!Memory.OffsetImm) return true;
1063    int64_t Val = Memory.OffsetImm->getValue();
1064    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1065  }
1066  bool isPostIdxImm8() const {
1067    if (Kind != k_Immediate)
1068      return false;
1069    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1070    if (!CE) return false;
1071    int64_t Val = CE->getValue();
1072    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1073  }
1074  bool isPostIdxImm8s4() const {
1075    if (Kind != k_Immediate)
1076      return false;
1077    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1078    if (!CE) return false;
1079    int64_t Val = CE->getValue();
1080    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1081      (Val == INT32_MIN);
1082  }
1083
1084  bool isMSRMask() const { return Kind == k_MSRMask; }
1085  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1086
1087  // NEON operands.
1088  bool isSingleSpacedVectorList() const {
1089    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1090  }
1091  bool isDoubleSpacedVectorList() const {
1092    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1093  }
1094  bool isVecListOneD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 1;
1097  }
1098
1099  bool isVecListTwoD() const {
1100    if (!isSingleSpacedVectorList()) return false;
1101    return VectorList.Count == 2;
1102  }
1103
1104  bool isVecListThreeD() const {
1105    if (!isSingleSpacedVectorList()) return false;
1106    return VectorList.Count == 3;
1107  }
1108
1109  bool isVecListFourD() const {
1110    if (!isSingleSpacedVectorList()) return false;
1111    return VectorList.Count == 4;
1112  }
1113
1114  bool isVecListTwoQ() const {
1115    if (!isDoubleSpacedVectorList()) return false;
1116    return VectorList.Count == 2;
1117  }
1118
1119  bool isVecListOneDAllLanes() const {
1120    if (Kind != k_VectorListAllLanes) return false;
1121    return VectorList.Count == 1;
1122  }
1123
1124  bool isVecListTwoDAllLanes() const {
1125    if (Kind != k_VectorListAllLanes) return false;
1126    return VectorList.Count == 2;
1127  }
1128
1129  bool isVecListOneDByteIndexed() const {
1130    if (Kind != k_VectorListIndexed) return false;
1131    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1132  }
1133
1134  bool isVecListOneDHWordIndexed() const {
1135    if (Kind != k_VectorListIndexed) return false;
1136    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1137  }
1138
1139  bool isVecListOneDWordIndexed() const {
1140    if (Kind != k_VectorListIndexed) return false;
1141    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1142  }
1143
1144  bool isVecListTwoDByteIndexed() const {
1145    if (Kind != k_VectorListIndexed) return false;
1146    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1147  }
1148
1149  bool isVecListTwoDHWordIndexed() const {
1150    if (Kind != k_VectorListIndexed) return false;
1151    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1152  }
1153
1154  bool isVecListTwoDWordIndexed() const {
1155    if (Kind != k_VectorListIndexed) return false;
1156    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1157  }
1158
1159  bool isVectorIndex8() const {
1160    if (Kind != k_VectorIndex) return false;
1161    return VectorIndex.Val < 8;
1162  }
1163  bool isVectorIndex16() const {
1164    if (Kind != k_VectorIndex) return false;
1165    return VectorIndex.Val < 4;
1166  }
1167  bool isVectorIndex32() const {
1168    if (Kind != k_VectorIndex) return false;
1169    return VectorIndex.Val < 2;
1170  }
1171
1172  bool isNEONi8splat() const {
1173    if (Kind != k_Immediate)
1174      return false;
1175    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1176    // Must be a constant.
1177    if (!CE) return false;
1178    int64_t Value = CE->getValue();
1179    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1180    // value.
1181    return Value >= 0 && Value < 256;
1182  }
1183
1184  bool isNEONi16splat() const {
1185    if (Kind != k_Immediate)
1186      return false;
1187    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188    // Must be a constant.
1189    if (!CE) return false;
1190    int64_t Value = CE->getValue();
1191    // i16 value in the range [0,255] or [0x0100, 0xff00]
1192    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1193  }
1194
1195  bool isNEONi32splat() const {
1196    if (Kind != k_Immediate)
1197      return false;
1198    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1199    // Must be a constant.
1200    if (!CE) return false;
1201    int64_t Value = CE->getValue();
1202    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1203    return (Value >= 0 && Value < 256) ||
1204      (Value >= 0x0100 && Value <= 0xff00) ||
1205      (Value >= 0x010000 && Value <= 0xff0000) ||
1206      (Value >= 0x01000000 && Value <= 0xff000000);
1207  }
1208
1209  bool isNEONi32vmov() const {
1210    if (Kind != k_Immediate)
1211      return false;
1212    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1213    // Must be a constant.
1214    if (!CE) return false;
1215    int64_t Value = CE->getValue();
1216    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1217    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1218    return (Value >= 0 && Value < 256) ||
1219      (Value >= 0x0100 && Value <= 0xff00) ||
1220      (Value >= 0x010000 && Value <= 0xff0000) ||
1221      (Value >= 0x01000000 && Value <= 0xff000000) ||
1222      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1223      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1224  }
1225
1226  bool isNEONi64splat() const {
1227    if (Kind != k_Immediate)
1228      return false;
1229    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1230    // Must be a constant.
1231    if (!CE) return false;
1232    uint64_t Value = CE->getValue();
1233    // i64 value with each byte being either 0 or 0xff.
1234    for (unsigned i = 0; i < 8; ++i)
1235      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1236    return true;
1237  }
1238
1239  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1240    // Add as immediates when possible.  Null MCExpr = 0.
1241    if (Expr == 0)
1242      Inst.addOperand(MCOperand::CreateImm(0));
1243    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1244      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1245    else
1246      Inst.addOperand(MCOperand::CreateExpr(Expr));
1247  }
1248
1249  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1250    assert(N == 2 && "Invalid number of operands!");
1251    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1252    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1253    Inst.addOperand(MCOperand::CreateReg(RegNum));
1254  }
1255
1256  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1257    assert(N == 1 && "Invalid number of operands!");
1258    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1259  }
1260
1261  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1262    assert(N == 1 && "Invalid number of operands!");
1263    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1264  }
1265
1266  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1267    assert(N == 1 && "Invalid number of operands!");
1268    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1269  }
1270
1271  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1272    assert(N == 1 && "Invalid number of operands!");
1273    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1274  }
1275
1276  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1277    assert(N == 1 && "Invalid number of operands!");
1278    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1279  }
1280
1281  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1282    assert(N == 1 && "Invalid number of operands!");
1283    Inst.addOperand(MCOperand::CreateReg(getReg()));
1284  }
1285
1286  void addRegOperands(MCInst &Inst, unsigned N) const {
1287    assert(N == 1 && "Invalid number of operands!");
1288    Inst.addOperand(MCOperand::CreateReg(getReg()));
1289  }
1290
1291  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1292    assert(N == 3 && "Invalid number of operands!");
1293    assert(isRegShiftedReg() &&
1294           "addRegShiftedRegOperands() on non RegShiftedReg!");
1295    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1296    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1297    Inst.addOperand(MCOperand::CreateImm(
1298      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1299  }
1300
1301  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1302    assert(N == 2 && "Invalid number of operands!");
1303    assert(isRegShiftedImm() &&
1304           "addRegShiftedImmOperands() on non RegShiftedImm!");
1305    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1306    Inst.addOperand(MCOperand::CreateImm(
1307      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1308  }
1309
1310  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1311    assert(N == 1 && "Invalid number of operands!");
1312    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1313                                         ShifterImm.Imm));
1314  }
1315
1316  void addRegListOperands(MCInst &Inst, unsigned N) const {
1317    assert(N == 1 && "Invalid number of operands!");
1318    const SmallVectorImpl<unsigned> &RegList = getRegList();
1319    for (SmallVectorImpl<unsigned>::const_iterator
1320           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1321      Inst.addOperand(MCOperand::CreateReg(*I));
1322  }
1323
1324  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1325    addRegListOperands(Inst, N);
1326  }
1327
1328  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1329    addRegListOperands(Inst, N);
1330  }
1331
1332  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1333    assert(N == 1 && "Invalid number of operands!");
1334    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1335    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1336  }
1337
1338  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1339    assert(N == 1 && "Invalid number of operands!");
1340    // Munge the lsb/width into a bitfield mask.
1341    unsigned lsb = Bitfield.LSB;
1342    unsigned width = Bitfield.Width;
1343    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1344    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1345                      (32 - (lsb + width)));
1346    Inst.addOperand(MCOperand::CreateImm(Mask));
1347  }
1348
1349  void addImmOperands(MCInst &Inst, unsigned N) const {
1350    assert(N == 1 && "Invalid number of operands!");
1351    addExpr(Inst, getImm());
1352  }
1353
1354  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1355    assert(N == 1 && "Invalid number of operands!");
1356    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1357  }
1358
1359  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1360    assert(N == 1 && "Invalid number of operands!");
1361    // FIXME: We really want to scale the value here, but the LDRD/STRD
1362    // instruction don't encode operands that way yet.
1363    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1364    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1365  }
1366
1367  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1368    assert(N == 1 && "Invalid number of operands!");
1369    // The immediate is scaled by four in the encoding and is stored
1370    // in the MCInst as such. Lop off the low two bits here.
1371    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1372    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1373  }
1374
1375  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1376    assert(N == 1 && "Invalid number of operands!");
1377    // The immediate is scaled by four in the encoding and is stored
1378    // in the MCInst as such. Lop off the low two bits here.
1379    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1380    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1381  }
1382
1383  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    // The constant encodes as the immediate-1, and we store in the instruction
1386    // the bits as encoded, so subtract off one here.
1387    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1388    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1389  }
1390
1391  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1392    assert(N == 1 && "Invalid number of operands!");
1393    // The constant encodes as the immediate-1, and we store in the instruction
1394    // the bits as encoded, so subtract off one here.
1395    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1396    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1397  }
1398
1399  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    // The constant encodes as the immediate, except for 32, which encodes as
1402    // zero.
1403    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1404    unsigned Imm = CE->getValue();
1405    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1406  }
1407
1408  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1409    assert(N == 1 && "Invalid number of operands!");
1410    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1411    // the instruction as well.
1412    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1413    int Val = CE->getValue();
1414    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1415  }
1416
1417  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1418    assert(N == 1 && "Invalid number of operands!");
1419    // The operand is actually a t2_so_imm, but we have its bitwise
1420    // negation in the assembly source, so twiddle it here.
1421    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1422    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1423  }
1424
1425  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1426    assert(N == 1 && "Invalid number of operands!");
1427    // The operand is actually a t2_so_imm, but we have its
1428    // negation in the assembly source, so twiddle it here.
1429    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1430    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1431  }
1432
1433  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1434    assert(N == 1 && "Invalid number of operands!");
1435    // The operand is actually a so_imm, but we have its bitwise
1436    // negation in the assembly source, so twiddle it here.
1437    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1438    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1439  }
1440
1441  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1442    assert(N == 1 && "Invalid number of operands!");
1443    // The operand is actually a so_imm, but we have its
1444    // negation in the assembly source, so twiddle it here.
1445    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1446    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1447  }
1448
1449  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1450    assert(N == 1 && "Invalid number of operands!");
1451    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1452  }
1453
1454  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1455    assert(N == 1 && "Invalid number of operands!");
1456    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1457  }
1458
1459  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1460    assert(N == 2 && "Invalid number of operands!");
1461    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1462    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1463  }
1464
1465  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1466    assert(N == 3 && "Invalid number of operands!");
1467    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1468    if (!Memory.OffsetRegNum) {
1469      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1470      // Special case for #-0
1471      if (Val == INT32_MIN) Val = 0;
1472      if (Val < 0) Val = -Val;
1473      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1474    } else {
1475      // For register offset, we encode the shift type and negation flag
1476      // here.
1477      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1478                              Memory.ShiftImm, Memory.ShiftType);
1479    }
1480    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1481    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1482    Inst.addOperand(MCOperand::CreateImm(Val));
1483  }
1484
1485  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1486    assert(N == 2 && "Invalid number of operands!");
1487    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1488    assert(CE && "non-constant AM2OffsetImm operand!");
1489    int32_t Val = CE->getValue();
1490    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1491    // Special case for #-0
1492    if (Val == INT32_MIN) Val = 0;
1493    if (Val < 0) Val = -Val;
1494    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1495    Inst.addOperand(MCOperand::CreateReg(0));
1496    Inst.addOperand(MCOperand::CreateImm(Val));
1497  }
1498
1499  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1500    assert(N == 3 && "Invalid number of operands!");
1501    // If we have an immediate that's not a constant, treat it as a label
1502    // reference needing a fixup. If it is a constant, it's something else
1503    // and we reject it.
1504    if (isImm()) {
1505      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1506      Inst.addOperand(MCOperand::CreateReg(0));
1507      Inst.addOperand(MCOperand::CreateImm(0));
1508      return;
1509    }
1510
1511    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1512    if (!Memory.OffsetRegNum) {
1513      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1514      // Special case for #-0
1515      if (Val == INT32_MIN) Val = 0;
1516      if (Val < 0) Val = -Val;
1517      Val = ARM_AM::getAM3Opc(AddSub, Val);
1518    } else {
1519      // For register offset, we encode the shift type and negation flag
1520      // here.
1521      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1522    }
1523    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1524    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1525    Inst.addOperand(MCOperand::CreateImm(Val));
1526  }
1527
1528  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1529    assert(N == 2 && "Invalid number of operands!");
1530    if (Kind == k_PostIndexRegister) {
1531      int32_t Val =
1532        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1533      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1534      Inst.addOperand(MCOperand::CreateImm(Val));
1535      return;
1536    }
1537
1538    // Constant offset.
1539    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1540    int32_t Val = CE->getValue();
1541    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1542    // Special case for #-0
1543    if (Val == INT32_MIN) Val = 0;
1544    if (Val < 0) Val = -Val;
1545    Val = ARM_AM::getAM3Opc(AddSub, Val);
1546    Inst.addOperand(MCOperand::CreateReg(0));
1547    Inst.addOperand(MCOperand::CreateImm(Val));
1548  }
1549
1550  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1551    assert(N == 2 && "Invalid number of operands!");
1552    // If we have an immediate that's not a constant, treat it as a label
1553    // reference needing a fixup. If it is a constant, it's something else
1554    // and we reject it.
1555    if (isImm()) {
1556      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1557      Inst.addOperand(MCOperand::CreateImm(0));
1558      return;
1559    }
1560
1561    // The lower two bits are always zero and as such are not encoded.
1562    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1563    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1564    // Special case for #-0
1565    if (Val == INT32_MIN) Val = 0;
1566    if (Val < 0) Val = -Val;
1567    Val = ARM_AM::getAM5Opc(AddSub, Val);
1568    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1569    Inst.addOperand(MCOperand::CreateImm(Val));
1570  }
1571
1572  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1573    assert(N == 2 && "Invalid number of operands!");
1574    // If we have an immediate that's not a constant, treat it as a label
1575    // reference needing a fixup. If it is a constant, it's something else
1576    // and we reject it.
1577    if (isImm()) {
1578      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1579      Inst.addOperand(MCOperand::CreateImm(0));
1580      return;
1581    }
1582
1583    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1584    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1585    Inst.addOperand(MCOperand::CreateImm(Val));
1586  }
1587
1588  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1589    assert(N == 2 && "Invalid number of operands!");
1590    // The lower two bits are always zero and as such are not encoded.
1591    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1592    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1593    Inst.addOperand(MCOperand::CreateImm(Val));
1594  }
1595
1596  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1597    assert(N == 2 && "Invalid number of operands!");
1598    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1599    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1600    Inst.addOperand(MCOperand::CreateImm(Val));
1601  }
1602
1603  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1604    addMemImm8OffsetOperands(Inst, N);
1605  }
1606
1607  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1608    addMemImm8OffsetOperands(Inst, N);
1609  }
1610
1611  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1612    assert(N == 2 && "Invalid number of operands!");
1613    // If this is an immediate, it's a label reference.
1614    if (Kind == k_Immediate) {
1615      addExpr(Inst, getImm());
1616      Inst.addOperand(MCOperand::CreateImm(0));
1617      return;
1618    }
1619
1620    // Otherwise, it's a normal memory reg+offset.
1621    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1622    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1623    Inst.addOperand(MCOperand::CreateImm(Val));
1624  }
1625
1626  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1627    assert(N == 2 && "Invalid number of operands!");
1628    // If this is an immediate, it's a label reference.
1629    if (Kind == k_Immediate) {
1630      addExpr(Inst, getImm());
1631      Inst.addOperand(MCOperand::CreateImm(0));
1632      return;
1633    }
1634
1635    // Otherwise, it's a normal memory reg+offset.
1636    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1637    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1638    Inst.addOperand(MCOperand::CreateImm(Val));
1639  }
1640
1641  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1642    assert(N == 2 && "Invalid number of operands!");
1643    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1644    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1645  }
1646
1647  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1648    assert(N == 2 && "Invalid number of operands!");
1649    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1650    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1651  }
1652
1653  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1654    assert(N == 3 && "Invalid number of operands!");
1655    unsigned Val =
1656      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1657                        Memory.ShiftImm, Memory.ShiftType);
1658    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1659    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1660    Inst.addOperand(MCOperand::CreateImm(Val));
1661  }
1662
1663  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1664    assert(N == 3 && "Invalid number of operands!");
1665    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1666    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1667    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1668  }
1669
1670  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1671    assert(N == 2 && "Invalid number of operands!");
1672    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1673    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1674  }
1675
1676  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1677    assert(N == 2 && "Invalid number of operands!");
1678    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1679    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1680    Inst.addOperand(MCOperand::CreateImm(Val));
1681  }
1682
1683  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1684    assert(N == 2 && "Invalid number of operands!");
1685    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1686    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1687    Inst.addOperand(MCOperand::CreateImm(Val));
1688  }
1689
1690  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1691    assert(N == 2 && "Invalid number of operands!");
1692    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1693    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1694    Inst.addOperand(MCOperand::CreateImm(Val));
1695  }
1696
1697  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1698    assert(N == 2 && "Invalid number of operands!");
1699    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1700    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1701    Inst.addOperand(MCOperand::CreateImm(Val));
1702  }
1703
1704  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1705    assert(N == 1 && "Invalid number of operands!");
1706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1707    assert(CE && "non-constant post-idx-imm8 operand!");
1708    int Imm = CE->getValue();
1709    bool isAdd = Imm >= 0;
1710    if (Imm == INT32_MIN) Imm = 0;
1711    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1712    Inst.addOperand(MCOperand::CreateImm(Imm));
1713  }
1714
1715  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1716    assert(N == 1 && "Invalid number of operands!");
1717    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1718    assert(CE && "non-constant post-idx-imm8s4 operand!");
1719    int Imm = CE->getValue();
1720    bool isAdd = Imm >= 0;
1721    if (Imm == INT32_MIN) Imm = 0;
1722    // Immediate is scaled by 4.
1723    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1724    Inst.addOperand(MCOperand::CreateImm(Imm));
1725  }
1726
1727  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1728    assert(N == 2 && "Invalid number of operands!");
1729    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1730    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1731  }
1732
1733  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1734    assert(N == 2 && "Invalid number of operands!");
1735    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1736    // The sign, shift type, and shift amount are encoded in a single operand
1737    // using the AM2 encoding helpers.
1738    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1739    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1740                                     PostIdxReg.ShiftTy);
1741    Inst.addOperand(MCOperand::CreateImm(Imm));
1742  }
1743
1744  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1745    assert(N == 1 && "Invalid number of operands!");
1746    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1747  }
1748
1749  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1750    assert(N == 1 && "Invalid number of operands!");
1751    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1752  }
1753
1754  void addVecListOperands(MCInst &Inst, unsigned N) const {
1755    assert(N == 1 && "Invalid number of operands!");
1756    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1757  }
1758
1759  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1760    assert(N == 2 && "Invalid number of operands!");
1761    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1762    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1763  }
1764
1765  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1766    assert(N == 1 && "Invalid number of operands!");
1767    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1768  }
1769
1770  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1771    assert(N == 1 && "Invalid number of operands!");
1772    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1773  }
1774
1775  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1776    assert(N == 1 && "Invalid number of operands!");
1777    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1778  }
1779
1780  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1781    assert(N == 1 && "Invalid number of operands!");
1782    // The immediate encodes the type of constant as well as the value.
1783    // Mask in that this is an i8 splat.
1784    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1785    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1786  }
1787
1788  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1789    assert(N == 1 && "Invalid number of operands!");
1790    // The immediate encodes the type of constant as well as the value.
1791    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1792    unsigned Value = CE->getValue();
1793    if (Value >= 256)
1794      Value = (Value >> 8) | 0xa00;
1795    else
1796      Value |= 0x800;
1797    Inst.addOperand(MCOperand::CreateImm(Value));
1798  }
1799
1800  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1801    assert(N == 1 && "Invalid number of operands!");
1802    // The immediate encodes the type of constant as well as the value.
1803    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1804    unsigned Value = CE->getValue();
1805    if (Value >= 256 && Value <= 0xff00)
1806      Value = (Value >> 8) | 0x200;
1807    else if (Value > 0xffff && Value <= 0xff0000)
1808      Value = (Value >> 16) | 0x400;
1809    else if (Value > 0xffffff)
1810      Value = (Value >> 24) | 0x600;
1811    Inst.addOperand(MCOperand::CreateImm(Value));
1812  }
1813
1814  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1815    assert(N == 1 && "Invalid number of operands!");
1816    // The immediate encodes the type of constant as well as the value.
1817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1818    unsigned Value = CE->getValue();
1819    if (Value >= 256 && Value <= 0xffff)
1820      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1821    else if (Value > 0xffff && Value <= 0xffffff)
1822      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1823    else if (Value > 0xffffff)
1824      Value = (Value >> 24) | 0x600;
1825    Inst.addOperand(MCOperand::CreateImm(Value));
1826  }
1827
1828  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1829    assert(N == 1 && "Invalid number of operands!");
1830    // The immediate encodes the type of constant as well as the value.
1831    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1832    uint64_t Value = CE->getValue();
1833    unsigned Imm = 0;
1834    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1835      Imm |= (Value & 1) << i;
1836    }
1837    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1838  }
1839
1840  virtual void print(raw_ostream &OS) const;
1841
1842  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1843    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1844    Op->ITMask.Mask = Mask;
1845    Op->StartLoc = S;
1846    Op->EndLoc = S;
1847    return Op;
1848  }
1849
1850  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1851    ARMOperand *Op = new ARMOperand(k_CondCode);
1852    Op->CC.Val = CC;
1853    Op->StartLoc = S;
1854    Op->EndLoc = S;
1855    return Op;
1856  }
1857
1858  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1859    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1860    Op->Cop.Val = CopVal;
1861    Op->StartLoc = S;
1862    Op->EndLoc = S;
1863    return Op;
1864  }
1865
1866  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1867    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1868    Op->Cop.Val = CopVal;
1869    Op->StartLoc = S;
1870    Op->EndLoc = S;
1871    return Op;
1872  }
1873
1874  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1875    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1876    Op->Cop.Val = Val;
1877    Op->StartLoc = S;
1878    Op->EndLoc = E;
1879    return Op;
1880  }
1881
1882  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1883    ARMOperand *Op = new ARMOperand(k_CCOut);
1884    Op->Reg.RegNum = RegNum;
1885    Op->StartLoc = S;
1886    Op->EndLoc = S;
1887    return Op;
1888  }
1889
1890  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1891    ARMOperand *Op = new ARMOperand(k_Token);
1892    Op->Tok.Data = Str.data();
1893    Op->Tok.Length = Str.size();
1894    Op->StartLoc = S;
1895    Op->EndLoc = S;
1896    return Op;
1897  }
1898
1899  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1900    ARMOperand *Op = new ARMOperand(k_Register);
1901    Op->Reg.RegNum = RegNum;
1902    Op->StartLoc = S;
1903    Op->EndLoc = E;
1904    return Op;
1905  }
1906
1907  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1908                                           unsigned SrcReg,
1909                                           unsigned ShiftReg,
1910                                           unsigned ShiftImm,
1911                                           SMLoc S, SMLoc E) {
1912    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1913    Op->RegShiftedReg.ShiftTy = ShTy;
1914    Op->RegShiftedReg.SrcReg = SrcReg;
1915    Op->RegShiftedReg.ShiftReg = ShiftReg;
1916    Op->RegShiftedReg.ShiftImm = ShiftImm;
1917    Op->StartLoc = S;
1918    Op->EndLoc = E;
1919    return Op;
1920  }
1921
1922  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1923                                            unsigned SrcReg,
1924                                            unsigned ShiftImm,
1925                                            SMLoc S, SMLoc E) {
1926    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1927    Op->RegShiftedImm.ShiftTy = ShTy;
1928    Op->RegShiftedImm.SrcReg = SrcReg;
1929    Op->RegShiftedImm.ShiftImm = ShiftImm;
1930    Op->StartLoc = S;
1931    Op->EndLoc = E;
1932    return Op;
1933  }
1934
1935  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1936                                   SMLoc S, SMLoc E) {
1937    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1938    Op->ShifterImm.isASR = isASR;
1939    Op->ShifterImm.Imm = Imm;
1940    Op->StartLoc = S;
1941    Op->EndLoc = E;
1942    return Op;
1943  }
1944
1945  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1946    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1947    Op->RotImm.Imm = Imm;
1948    Op->StartLoc = S;
1949    Op->EndLoc = E;
1950    return Op;
1951  }
1952
1953  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1954                                    SMLoc S, SMLoc E) {
1955    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1956    Op->Bitfield.LSB = LSB;
1957    Op->Bitfield.Width = Width;
1958    Op->StartLoc = S;
1959    Op->EndLoc = E;
1960    return Op;
1961  }
1962
1963  static ARMOperand *
1964  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1965                SMLoc StartLoc, SMLoc EndLoc) {
1966    KindTy Kind = k_RegisterList;
1967
1968    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1969      Kind = k_DPRRegisterList;
1970    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
1971             contains(Regs.front().first))
1972      Kind = k_SPRRegisterList;
1973
1974    ARMOperand *Op = new ARMOperand(Kind);
1975    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
1976           I = Regs.begin(), E = Regs.end(); I != E; ++I)
1977      Op->Registers.push_back(I->first);
1978    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
1979    Op->StartLoc = StartLoc;
1980    Op->EndLoc = EndLoc;
1981    return Op;
1982  }
1983
1984  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
1985                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
1986    ARMOperand *Op = new ARMOperand(k_VectorList);
1987    Op->VectorList.RegNum = RegNum;
1988    Op->VectorList.Count = Count;
1989    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
1990    Op->StartLoc = S;
1991    Op->EndLoc = E;
1992    return Op;
1993  }
1994
1995  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
1996                                              SMLoc S, SMLoc E) {
1997    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
1998    Op->VectorList.RegNum = RegNum;
1999    Op->VectorList.Count = Count;
2000    Op->StartLoc = S;
2001    Op->EndLoc = E;
2002    return Op;
2003  }
2004
2005  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2006                                             unsigned Index, SMLoc S, SMLoc E) {
2007    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2008    Op->VectorList.RegNum = RegNum;
2009    Op->VectorList.Count = Count;
2010    Op->VectorList.LaneIndex = Index;
2011    Op->StartLoc = S;
2012    Op->EndLoc = E;
2013    return Op;
2014  }
2015
2016  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2017                                       MCContext &Ctx) {
2018    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2019    Op->VectorIndex.Val = Idx;
2020    Op->StartLoc = S;
2021    Op->EndLoc = E;
2022    return Op;
2023  }
2024
2025  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2026    ARMOperand *Op = new ARMOperand(k_Immediate);
2027    Op->Imm.Val = Val;
2028    Op->StartLoc = S;
2029    Op->EndLoc = E;
2030    return Op;
2031  }
2032
2033  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2034    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2035    Op->FPImm.Val = Val;
2036    Op->StartLoc = S;
2037    Op->EndLoc = S;
2038    return Op;
2039  }
2040
2041  static ARMOperand *CreateMem(unsigned BaseRegNum,
2042                               const MCConstantExpr *OffsetImm,
2043                               unsigned OffsetRegNum,
2044                               ARM_AM::ShiftOpc ShiftType,
2045                               unsigned ShiftImm,
2046                               unsigned Alignment,
2047                               bool isNegative,
2048                               SMLoc S, SMLoc E) {
2049    ARMOperand *Op = new ARMOperand(k_Memory);
2050    Op->Memory.BaseRegNum = BaseRegNum;
2051    Op->Memory.OffsetImm = OffsetImm;
2052    Op->Memory.OffsetRegNum = OffsetRegNum;
2053    Op->Memory.ShiftType = ShiftType;
2054    Op->Memory.ShiftImm = ShiftImm;
2055    Op->Memory.Alignment = Alignment;
2056    Op->Memory.isNegative = isNegative;
2057    Op->StartLoc = S;
2058    Op->EndLoc = E;
2059    return Op;
2060  }
2061
2062  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2063                                      ARM_AM::ShiftOpc ShiftTy,
2064                                      unsigned ShiftImm,
2065                                      SMLoc S, SMLoc E) {
2066    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2067    Op->PostIdxReg.RegNum = RegNum;
2068    Op->PostIdxReg.isAdd = isAdd;
2069    Op->PostIdxReg.ShiftTy = ShiftTy;
2070    Op->PostIdxReg.ShiftImm = ShiftImm;
2071    Op->StartLoc = S;
2072    Op->EndLoc = E;
2073    return Op;
2074  }
2075
2076  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2077    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2078    Op->MBOpt.Val = Opt;
2079    Op->StartLoc = S;
2080    Op->EndLoc = S;
2081    return Op;
2082  }
2083
2084  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2085    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2086    Op->IFlags.Val = IFlags;
2087    Op->StartLoc = S;
2088    Op->EndLoc = S;
2089    return Op;
2090  }
2091
2092  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2093    ARMOperand *Op = new ARMOperand(k_MSRMask);
2094    Op->MMask.Val = MMask;
2095    Op->StartLoc = S;
2096    Op->EndLoc = S;
2097    return Op;
2098  }
2099};
2100
2101} // end anonymous namespace.
2102
2103void ARMOperand::print(raw_ostream &OS) const {
2104  switch (Kind) {
2105  case k_FPImmediate:
2106    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2107       << ") >";
2108    break;
2109  case k_CondCode:
2110    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2111    break;
2112  case k_CCOut:
2113    OS << "<ccout " << getReg() << ">";
2114    break;
2115  case k_ITCondMask: {
2116    static const char *MaskStr[] = {
2117      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2118      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2119    };
2120    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2121    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2122    break;
2123  }
2124  case k_CoprocNum:
2125    OS << "<coprocessor number: " << getCoproc() << ">";
2126    break;
2127  case k_CoprocReg:
2128    OS << "<coprocessor register: " << getCoproc() << ">";
2129    break;
2130  case k_CoprocOption:
2131    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2132    break;
2133  case k_MSRMask:
2134    OS << "<mask: " << getMSRMask() << ">";
2135    break;
2136  case k_Immediate:
2137    getImm()->print(OS);
2138    break;
2139  case k_MemBarrierOpt:
2140    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2141    break;
2142  case k_Memory:
2143    OS << "<memory "
2144       << " base:" << Memory.BaseRegNum;
2145    OS << ">";
2146    break;
2147  case k_PostIndexRegister:
2148    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2149       << PostIdxReg.RegNum;
2150    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2151      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2152         << PostIdxReg.ShiftImm;
2153    OS << ">";
2154    break;
2155  case k_ProcIFlags: {
2156    OS << "<ARM_PROC::";
2157    unsigned IFlags = getProcIFlags();
2158    for (int i=2; i >= 0; --i)
2159      if (IFlags & (1 << i))
2160        OS << ARM_PROC::IFlagsToString(1 << i);
2161    OS << ">";
2162    break;
2163  }
2164  case k_Register:
2165    OS << "<register " << getReg() << ">";
2166    break;
2167  case k_ShifterImmediate:
2168    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2169       << " #" << ShifterImm.Imm << ">";
2170    break;
2171  case k_ShiftedRegister:
2172    OS << "<so_reg_reg "
2173       << RegShiftedReg.SrcReg << " "
2174       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2175       << " " << RegShiftedReg.ShiftReg << ">";
2176    break;
2177  case k_ShiftedImmediate:
2178    OS << "<so_reg_imm "
2179       << RegShiftedImm.SrcReg << " "
2180       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2181       << " #" << RegShiftedImm.ShiftImm << ">";
2182    break;
2183  case k_RotateImmediate:
2184    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2185    break;
2186  case k_BitfieldDescriptor:
2187    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2188       << ", width: " << Bitfield.Width << ">";
2189    break;
2190  case k_RegisterList:
2191  case k_DPRRegisterList:
2192  case k_SPRRegisterList: {
2193    OS << "<register_list ";
2194
2195    const SmallVectorImpl<unsigned> &RegList = getRegList();
2196    for (SmallVectorImpl<unsigned>::const_iterator
2197           I = RegList.begin(), E = RegList.end(); I != E; ) {
2198      OS << *I;
2199      if (++I < E) OS << ", ";
2200    }
2201
2202    OS << ">";
2203    break;
2204  }
2205  case k_VectorList:
2206    OS << "<vector_list " << VectorList.Count << " * "
2207       << VectorList.RegNum << ">";
2208    break;
2209  case k_VectorListAllLanes:
2210    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2211       << VectorList.RegNum << ">";
2212    break;
2213  case k_VectorListIndexed:
2214    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2215       << VectorList.Count << " * " << VectorList.RegNum << ">";
2216    break;
2217  case k_Token:
2218    OS << "'" << getToken() << "'";
2219    break;
2220  case k_VectorIndex:
2221    OS << "<vectorindex " << getVectorIndex() << ">";
2222    break;
2223  }
2224}
2225
2226/// @name Auto-generated Match Functions
2227/// {
2228
2229static unsigned MatchRegisterName(StringRef Name);
2230
2231/// }
2232
2233bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2234                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2235  StartLoc = Parser.getTok().getLoc();
2236  RegNo = tryParseRegister();
2237  EndLoc = Parser.getTok().getLoc();
2238
2239  return (RegNo == (unsigned)-1);
2240}
2241
2242/// Try to parse a register name.  The token must be an Identifier when called,
2243/// and if it is a register name the token is eaten and the register number is
2244/// returned.  Otherwise return -1.
2245///
2246int ARMAsmParser::tryParseRegister() {
2247  const AsmToken &Tok = Parser.getTok();
2248  if (Tok.isNot(AsmToken::Identifier)) return -1;
2249
2250  std::string lowerCase = Tok.getString().lower();
2251  unsigned RegNum = MatchRegisterName(lowerCase);
2252  if (!RegNum) {
2253    RegNum = StringSwitch<unsigned>(lowerCase)
2254      .Case("r13", ARM::SP)
2255      .Case("r14", ARM::LR)
2256      .Case("r15", ARM::PC)
2257      .Case("ip", ARM::R12)
2258      // Additional register name aliases for 'gas' compatibility.
2259      .Case("a1", ARM::R0)
2260      .Case("a2", ARM::R1)
2261      .Case("a3", ARM::R2)
2262      .Case("a4", ARM::R3)
2263      .Case("v1", ARM::R4)
2264      .Case("v2", ARM::R5)
2265      .Case("v3", ARM::R6)
2266      .Case("v4", ARM::R7)
2267      .Case("v5", ARM::R8)
2268      .Case("v6", ARM::R9)
2269      .Case("v7", ARM::R10)
2270      .Case("v8", ARM::R11)
2271      .Case("sb", ARM::R9)
2272      .Case("sl", ARM::R10)
2273      .Case("fp", ARM::R11)
2274      .Default(0);
2275  }
2276  if (!RegNum) {
2277    // Check for aliases registered via .req.
2278    StringMap<unsigned>::const_iterator Entry =
2279      RegisterReqs.find(Tok.getIdentifier());
2280    // If no match, return failure.
2281    if (Entry == RegisterReqs.end())
2282      return -1;
2283    Parser.Lex(); // Eat identifier token.
2284    return Entry->getValue();
2285  }
2286
2287  Parser.Lex(); // Eat identifier token.
2288
2289  return RegNum;
2290}
2291
2292// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2293// If a recoverable error occurs, return 1. If an irrecoverable error
2294// occurs, return -1. An irrecoverable error is one where tokens have been
2295// consumed in the process of trying to parse the shifter (i.e., when it is
2296// indeed a shifter operand, but malformed).
2297int ARMAsmParser::tryParseShiftRegister(
2298                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2299  SMLoc S = Parser.getTok().getLoc();
2300  const AsmToken &Tok = Parser.getTok();
2301  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2302
2303  std::string lowerCase = Tok.getString().lower();
2304  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2305      .Case("asl", ARM_AM::lsl)
2306      .Case("lsl", ARM_AM::lsl)
2307      .Case("lsr", ARM_AM::lsr)
2308      .Case("asr", ARM_AM::asr)
2309      .Case("ror", ARM_AM::ror)
2310      .Case("rrx", ARM_AM::rrx)
2311      .Default(ARM_AM::no_shift);
2312
2313  if (ShiftTy == ARM_AM::no_shift)
2314    return 1;
2315
2316  Parser.Lex(); // Eat the operator.
2317
2318  // The source register for the shift has already been added to the
2319  // operand list, so we need to pop it off and combine it into the shifted
2320  // register operand instead.
2321  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2322  if (!PrevOp->isReg())
2323    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2324  int SrcReg = PrevOp->getReg();
2325  int64_t Imm = 0;
2326  int ShiftReg = 0;
2327  if (ShiftTy == ARM_AM::rrx) {
2328    // RRX Doesn't have an explicit shift amount. The encoder expects
2329    // the shift register to be the same as the source register. Seems odd,
2330    // but OK.
2331    ShiftReg = SrcReg;
2332  } else {
2333    // Figure out if this is shifted by a constant or a register (for non-RRX).
2334    if (Parser.getTok().is(AsmToken::Hash) ||
2335        Parser.getTok().is(AsmToken::Dollar)) {
2336      Parser.Lex(); // Eat hash.
2337      SMLoc ImmLoc = Parser.getTok().getLoc();
2338      const MCExpr *ShiftExpr = 0;
2339      if (getParser().ParseExpression(ShiftExpr)) {
2340        Error(ImmLoc, "invalid immediate shift value");
2341        return -1;
2342      }
2343      // The expression must be evaluatable as an immediate.
2344      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2345      if (!CE) {
2346        Error(ImmLoc, "invalid immediate shift value");
2347        return -1;
2348      }
2349      // Range check the immediate.
2350      // lsl, ror: 0 <= imm <= 31
2351      // lsr, asr: 0 <= imm <= 32
2352      Imm = CE->getValue();
2353      if (Imm < 0 ||
2354          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2355          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2356        Error(ImmLoc, "immediate shift value out of range");
2357        return -1;
2358      }
2359    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2360      ShiftReg = tryParseRegister();
2361      SMLoc L = Parser.getTok().getLoc();
2362      if (ShiftReg == -1) {
2363        Error (L, "expected immediate or register in shift operand");
2364        return -1;
2365      }
2366    } else {
2367      Error (Parser.getTok().getLoc(),
2368                    "expected immediate or register in shift operand");
2369      return -1;
2370    }
2371  }
2372
2373  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2374    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2375                                                         ShiftReg, Imm,
2376                                               S, Parser.getTok().getLoc()));
2377  else
2378    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2379                                               S, Parser.getTok().getLoc()));
2380
2381  return 0;
2382}
2383
2384
2385/// Try to parse a register name.  The token must be an Identifier when called.
2386/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2387/// if there is a "writeback". 'true' if it's not a register.
2388///
2389/// TODO this is likely to change to allow different register types and or to
2390/// parse for a specific register type.
2391bool ARMAsmParser::
2392tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2393  SMLoc S = Parser.getTok().getLoc();
2394  int RegNo = tryParseRegister();
2395  if (RegNo == -1)
2396    return true;
2397
2398  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2399
2400  const AsmToken &ExclaimTok = Parser.getTok();
2401  if (ExclaimTok.is(AsmToken::Exclaim)) {
2402    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2403                                               ExclaimTok.getLoc()));
2404    Parser.Lex(); // Eat exclaim token
2405    return false;
2406  }
2407
2408  // Also check for an index operand. This is only legal for vector registers,
2409  // but that'll get caught OK in operand matching, so we don't need to
2410  // explicitly filter everything else out here.
2411  if (Parser.getTok().is(AsmToken::LBrac)) {
2412    SMLoc SIdx = Parser.getTok().getLoc();
2413    Parser.Lex(); // Eat left bracket token.
2414
2415    const MCExpr *ImmVal;
2416    if (getParser().ParseExpression(ImmVal))
2417      return MatchOperand_ParseFail;
2418    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2419    if (!MCE) {
2420      TokError("immediate value expected for vector index");
2421      return MatchOperand_ParseFail;
2422    }
2423
2424    SMLoc E = Parser.getTok().getLoc();
2425    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2426      Error(E, "']' expected");
2427      return MatchOperand_ParseFail;
2428    }
2429
2430    Parser.Lex(); // Eat right bracket token.
2431
2432    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2433                                                     SIdx, E,
2434                                                     getContext()));
2435  }
2436
2437  return false;
2438}
2439
2440/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2441/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2442/// "c5", ...
2443static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2444  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2445  // but efficient.
2446  switch (Name.size()) {
2447  default: break;
2448  case 2:
2449    if (Name[0] != CoprocOp)
2450      return -1;
2451    switch (Name[1]) {
2452    default:  return -1;
2453    case '0': return 0;
2454    case '1': return 1;
2455    case '2': return 2;
2456    case '3': return 3;
2457    case '4': return 4;
2458    case '5': return 5;
2459    case '6': return 6;
2460    case '7': return 7;
2461    case '8': return 8;
2462    case '9': return 9;
2463    }
2464    break;
2465  case 3:
2466    if (Name[0] != CoprocOp || Name[1] != '1')
2467      return -1;
2468    switch (Name[2]) {
2469    default:  return -1;
2470    case '0': return 10;
2471    case '1': return 11;
2472    case '2': return 12;
2473    case '3': return 13;
2474    case '4': return 14;
2475    case '5': return 15;
2476    }
2477    break;
2478  }
2479
2480  return -1;
2481}
2482
2483/// parseITCondCode - Try to parse a condition code for an IT instruction.
2484ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2485parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2486  SMLoc S = Parser.getTok().getLoc();
2487  const AsmToken &Tok = Parser.getTok();
2488  if (!Tok.is(AsmToken::Identifier))
2489    return MatchOperand_NoMatch;
2490  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2491    .Case("eq", ARMCC::EQ)
2492    .Case("ne", ARMCC::NE)
2493    .Case("hs", ARMCC::HS)
2494    .Case("cs", ARMCC::HS)
2495    .Case("lo", ARMCC::LO)
2496    .Case("cc", ARMCC::LO)
2497    .Case("mi", ARMCC::MI)
2498    .Case("pl", ARMCC::PL)
2499    .Case("vs", ARMCC::VS)
2500    .Case("vc", ARMCC::VC)
2501    .Case("hi", ARMCC::HI)
2502    .Case("ls", ARMCC::LS)
2503    .Case("ge", ARMCC::GE)
2504    .Case("lt", ARMCC::LT)
2505    .Case("gt", ARMCC::GT)
2506    .Case("le", ARMCC::LE)
2507    .Case("al", ARMCC::AL)
2508    .Default(~0U);
2509  if (CC == ~0U)
2510    return MatchOperand_NoMatch;
2511  Parser.Lex(); // Eat the token.
2512
2513  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2514
2515  return MatchOperand_Success;
2516}
2517
2518/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2519/// token must be an Identifier when called, and if it is a coprocessor
2520/// number, the token is eaten and the operand is added to the operand list.
2521ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2522parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2523  SMLoc S = Parser.getTok().getLoc();
2524  const AsmToken &Tok = Parser.getTok();
2525  if (Tok.isNot(AsmToken::Identifier))
2526    return MatchOperand_NoMatch;
2527
2528  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2529  if (Num == -1)
2530    return MatchOperand_NoMatch;
2531
2532  Parser.Lex(); // Eat identifier token.
2533  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2534  return MatchOperand_Success;
2535}
2536
2537/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2538/// token must be an Identifier when called, and if it is a coprocessor
2539/// number, the token is eaten and the operand is added to the operand list.
2540ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2541parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2542  SMLoc S = Parser.getTok().getLoc();
2543  const AsmToken &Tok = Parser.getTok();
2544  if (Tok.isNot(AsmToken::Identifier))
2545    return MatchOperand_NoMatch;
2546
2547  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2548  if (Reg == -1)
2549    return MatchOperand_NoMatch;
2550
2551  Parser.Lex(); // Eat identifier token.
2552  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2553  return MatchOperand_Success;
2554}
2555
2556/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2557/// coproc_option : '{' imm0_255 '}'
2558ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2559parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2560  SMLoc S = Parser.getTok().getLoc();
2561
2562  // If this isn't a '{', this isn't a coprocessor immediate operand.
2563  if (Parser.getTok().isNot(AsmToken::LCurly))
2564    return MatchOperand_NoMatch;
2565  Parser.Lex(); // Eat the '{'
2566
2567  const MCExpr *Expr;
2568  SMLoc Loc = Parser.getTok().getLoc();
2569  if (getParser().ParseExpression(Expr)) {
2570    Error(Loc, "illegal expression");
2571    return MatchOperand_ParseFail;
2572  }
2573  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2574  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2575    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2576    return MatchOperand_ParseFail;
2577  }
2578  int Val = CE->getValue();
2579
2580  // Check for and consume the closing '}'
2581  if (Parser.getTok().isNot(AsmToken::RCurly))
2582    return MatchOperand_ParseFail;
2583  SMLoc E = Parser.getTok().getLoc();
2584  Parser.Lex(); // Eat the '}'
2585
2586  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2587  return MatchOperand_Success;
2588}
2589
2590// For register list parsing, we need to map from raw GPR register numbering
2591// to the enumeration values. The enumeration values aren't sorted by
2592// register number due to our using "sp", "lr" and "pc" as canonical names.
2593static unsigned getNextRegister(unsigned Reg) {
2594  // If this is a GPR, we need to do it manually, otherwise we can rely
2595  // on the sort ordering of the enumeration since the other reg-classes
2596  // are sane.
2597  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2598    return Reg + 1;
2599  switch(Reg) {
2600  default: assert(0 && "Invalid GPR number!");
2601  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2602  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2603  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2604  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2605  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2606  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2607  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2608  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2609  }
2610}
2611
2612// Return the low-subreg of a given Q register.
2613static unsigned getDRegFromQReg(unsigned QReg) {
2614  switch (QReg) {
2615  default: llvm_unreachable("expected a Q register!");
2616  case ARM::Q0:  return ARM::D0;
2617  case ARM::Q1:  return ARM::D2;
2618  case ARM::Q2:  return ARM::D4;
2619  case ARM::Q3:  return ARM::D6;
2620  case ARM::Q4:  return ARM::D8;
2621  case ARM::Q5:  return ARM::D10;
2622  case ARM::Q6:  return ARM::D12;
2623  case ARM::Q7:  return ARM::D14;
2624  case ARM::Q8:  return ARM::D16;
2625  case ARM::Q9:  return ARM::D18;
2626  case ARM::Q10: return ARM::D20;
2627  case ARM::Q11: return ARM::D22;
2628  case ARM::Q12: return ARM::D24;
2629  case ARM::Q13: return ARM::D26;
2630  case ARM::Q14: return ARM::D28;
2631  case ARM::Q15: return ARM::D30;
2632  }
2633}
2634
2635/// Parse a register list.
2636bool ARMAsmParser::
2637parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2638  assert(Parser.getTok().is(AsmToken::LCurly) &&
2639         "Token is not a Left Curly Brace");
2640  SMLoc S = Parser.getTok().getLoc();
2641  Parser.Lex(); // Eat '{' token.
2642  SMLoc RegLoc = Parser.getTok().getLoc();
2643
2644  // Check the first register in the list to see what register class
2645  // this is a list of.
2646  int Reg = tryParseRegister();
2647  if (Reg == -1)
2648    return Error(RegLoc, "register expected");
2649
2650  // The reglist instructions have at most 16 registers, so reserve
2651  // space for that many.
2652  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2653
2654  // Allow Q regs and just interpret them as the two D sub-registers.
2655  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2656    Reg = getDRegFromQReg(Reg);
2657    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2658    ++Reg;
2659  }
2660  const MCRegisterClass *RC;
2661  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2662    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2663  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2664    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2665  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2666    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2667  else
2668    return Error(RegLoc, "invalid register in register list");
2669
2670  // Store the register.
2671  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2672
2673  // This starts immediately after the first register token in the list,
2674  // so we can see either a comma or a minus (range separator) as a legal
2675  // next token.
2676  while (Parser.getTok().is(AsmToken::Comma) ||
2677         Parser.getTok().is(AsmToken::Minus)) {
2678    if (Parser.getTok().is(AsmToken::Minus)) {
2679      Parser.Lex(); // Eat the minus.
2680      SMLoc EndLoc = Parser.getTok().getLoc();
2681      int EndReg = tryParseRegister();
2682      if (EndReg == -1)
2683        return Error(EndLoc, "register expected");
2684      // Allow Q regs and just interpret them as the two D sub-registers.
2685      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2686        EndReg = getDRegFromQReg(EndReg) + 1;
2687      // If the register is the same as the start reg, there's nothing
2688      // more to do.
2689      if (Reg == EndReg)
2690        continue;
2691      // The register must be in the same register class as the first.
2692      if (!RC->contains(EndReg))
2693        return Error(EndLoc, "invalid register in register list");
2694      // Ranges must go from low to high.
2695      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2696        return Error(EndLoc, "bad range in register list");
2697
2698      // Add all the registers in the range to the register list.
2699      while (Reg != EndReg) {
2700        Reg = getNextRegister(Reg);
2701        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2702      }
2703      continue;
2704    }
2705    Parser.Lex(); // Eat the comma.
2706    RegLoc = Parser.getTok().getLoc();
2707    int OldReg = Reg;
2708    const AsmToken RegTok = Parser.getTok();
2709    Reg = tryParseRegister();
2710    if (Reg == -1)
2711      return Error(RegLoc, "register expected");
2712    // Allow Q regs and just interpret them as the two D sub-registers.
2713    bool isQReg = false;
2714    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2715      Reg = getDRegFromQReg(Reg);
2716      isQReg = true;
2717    }
2718    // The register must be in the same register class as the first.
2719    if (!RC->contains(Reg))
2720      return Error(RegLoc, "invalid register in register list");
2721    // List must be monotonically increasing.
2722    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2723      return Error(RegLoc, "register list not in ascending order");
2724    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2725      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2726              ") in register list");
2727      continue;
2728    }
2729    // VFP register lists must also be contiguous.
2730    // It's OK to use the enumeration values directly here rather, as the
2731    // VFP register classes have the enum sorted properly.
2732    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2733        Reg != OldReg + 1)
2734      return Error(RegLoc, "non-contiguous register range");
2735    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2736    if (isQReg)
2737      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2738  }
2739
2740  SMLoc E = Parser.getTok().getLoc();
2741  if (Parser.getTok().isNot(AsmToken::RCurly))
2742    return Error(E, "'}' expected");
2743  Parser.Lex(); // Eat '}' token.
2744
2745  // Push the register list operand.
2746  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2747
2748  // The ARM system instruction variants for LDM/STM have a '^' token here.
2749  if (Parser.getTok().is(AsmToken::Caret)) {
2750    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2751    Parser.Lex(); // Eat '^' token.
2752  }
2753
2754  return false;
2755}
2756
2757// Helper function to parse the lane index for vector lists.
2758ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2759parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2760  Index = 0; // Always return a defined index value.
2761  if (Parser.getTok().is(AsmToken::LBrac)) {
2762    Parser.Lex(); // Eat the '['.
2763    if (Parser.getTok().is(AsmToken::RBrac)) {
2764      // "Dn[]" is the 'all lanes' syntax.
2765      LaneKind = AllLanes;
2766      Parser.Lex(); // Eat the ']'.
2767      return MatchOperand_Success;
2768    }
2769    if (Parser.getTok().is(AsmToken::Integer)) {
2770      int64_t Val = Parser.getTok().getIntVal();
2771      // Make this range check context sensitive for .8, .16, .32.
2772      if (Val < 0 && Val > 7)
2773        Error(Parser.getTok().getLoc(), "lane index out of range");
2774      Index = Val;
2775      LaneKind = IndexedLane;
2776      Parser.Lex(); // Eat the token;
2777      if (Parser.getTok().isNot(AsmToken::RBrac))
2778        Error(Parser.getTok().getLoc(), "']' expected");
2779      Parser.Lex(); // Eat the ']'.
2780      return MatchOperand_Success;
2781    }
2782    Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2783    return MatchOperand_ParseFail;
2784  }
2785  LaneKind = NoLanes;
2786  return MatchOperand_Success;
2787}
2788
2789// parse a vector register list
2790ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2791parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2792  VectorLaneTy LaneKind;
2793  unsigned LaneIndex;
2794  SMLoc S = Parser.getTok().getLoc();
2795  // As an extension (to match gas), support a plain D register or Q register
2796  // (without encosing curly braces) as a single or double entry list,
2797  // respectively.
2798  if (Parser.getTok().is(AsmToken::Identifier)) {
2799    int Reg = tryParseRegister();
2800    if (Reg == -1)
2801      return MatchOperand_NoMatch;
2802    SMLoc E = Parser.getTok().getLoc();
2803    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2804      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2805      if (Res != MatchOperand_Success)
2806        return Res;
2807      switch (LaneKind) {
2808      default:
2809        assert(0 && "unexpected lane kind!");
2810      case NoLanes:
2811        E = Parser.getTok().getLoc();
2812        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2813        break;
2814      case AllLanes:
2815        E = Parser.getTok().getLoc();
2816        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2817        break;
2818      case IndexedLane:
2819        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2820                                                               LaneIndex, S,E));
2821        break;
2822      }
2823      return MatchOperand_Success;
2824    }
2825    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2826      Reg = getDRegFromQReg(Reg);
2827      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2828      if (Res != MatchOperand_Success)
2829        return Res;
2830      switch (LaneKind) {
2831      default:
2832        assert(0 && "unexpected lane kind!");
2833      case NoLanes:
2834        E = Parser.getTok().getLoc();
2835        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2836        break;
2837      case AllLanes:
2838        E = Parser.getTok().getLoc();
2839        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2840        break;
2841      case IndexedLane:
2842        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2843                                                               LaneIndex, S,E));
2844        break;
2845      }
2846      return MatchOperand_Success;
2847    }
2848    Error(S, "vector register expected");
2849    return MatchOperand_ParseFail;
2850  }
2851
2852  if (Parser.getTok().isNot(AsmToken::LCurly))
2853    return MatchOperand_NoMatch;
2854
2855  Parser.Lex(); // Eat '{' token.
2856  SMLoc RegLoc = Parser.getTok().getLoc();
2857
2858  int Reg = tryParseRegister();
2859  if (Reg == -1) {
2860    Error(RegLoc, "register expected");
2861    return MatchOperand_ParseFail;
2862  }
2863  unsigned Count = 1;
2864  int Spacing = 0;
2865  unsigned FirstReg = Reg;
2866  // The list is of D registers, but we also allow Q regs and just interpret
2867  // them as the two D sub-registers.
2868  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2869    FirstReg = Reg = getDRegFromQReg(Reg);
2870    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2871                 // it's ambiguous with four-register single spaced.
2872    ++Reg;
2873    ++Count;
2874  }
2875  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2876    return MatchOperand_ParseFail;
2877
2878  while (Parser.getTok().is(AsmToken::Comma) ||
2879         Parser.getTok().is(AsmToken::Minus)) {
2880    if (Parser.getTok().is(AsmToken::Minus)) {
2881      if (!Spacing)
2882        Spacing = 1; // Register range implies a single spaced list.
2883      else if (Spacing == 2) {
2884        Error(Parser.getTok().getLoc(),
2885              "sequential registers in double spaced list");
2886        return MatchOperand_ParseFail;
2887      }
2888      Parser.Lex(); // Eat the minus.
2889      SMLoc EndLoc = Parser.getTok().getLoc();
2890      int EndReg = tryParseRegister();
2891      if (EndReg == -1) {
2892        Error(EndLoc, "register expected");
2893        return MatchOperand_ParseFail;
2894      }
2895      // Allow Q regs and just interpret them as the two D sub-registers.
2896      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2897        EndReg = getDRegFromQReg(EndReg) + 1;
2898      // If the register is the same as the start reg, there's nothing
2899      // more to do.
2900      if (Reg == EndReg)
2901        continue;
2902      // The register must be in the same register class as the first.
2903      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2904        Error(EndLoc, "invalid register in register list");
2905        return MatchOperand_ParseFail;
2906      }
2907      // Ranges must go from low to high.
2908      if (Reg > EndReg) {
2909        Error(EndLoc, "bad range in register list");
2910        return MatchOperand_ParseFail;
2911      }
2912      // Parse the lane specifier if present.
2913      VectorLaneTy NextLaneKind;
2914      unsigned NextLaneIndex;
2915      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2916        return MatchOperand_ParseFail;
2917      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2918        Error(EndLoc, "mismatched lane index in register list");
2919        return MatchOperand_ParseFail;
2920      }
2921      EndLoc = Parser.getTok().getLoc();
2922
2923      // Add all the registers in the range to the register list.
2924      Count += EndReg - Reg;
2925      Reg = EndReg;
2926      continue;
2927    }
2928    Parser.Lex(); // Eat the comma.
2929    RegLoc = Parser.getTok().getLoc();
2930    int OldReg = Reg;
2931    Reg = tryParseRegister();
2932    if (Reg == -1) {
2933      Error(RegLoc, "register expected");
2934      return MatchOperand_ParseFail;
2935    }
2936    // vector register lists must be contiguous.
2937    // It's OK to use the enumeration values directly here rather, as the
2938    // VFP register classes have the enum sorted properly.
2939    //
2940    // The list is of D registers, but we also allow Q regs and just interpret
2941    // them as the two D sub-registers.
2942    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2943      if (!Spacing)
2944        Spacing = 1; // Register range implies a single spaced list.
2945      else if (Spacing == 2) {
2946        Error(RegLoc,
2947              "invalid register in double-spaced list (must be 'D' register')");
2948        return MatchOperand_ParseFail;
2949      }
2950      Reg = getDRegFromQReg(Reg);
2951      if (Reg != OldReg + 1) {
2952        Error(RegLoc, "non-contiguous register range");
2953        return MatchOperand_ParseFail;
2954      }
2955      ++Reg;
2956      Count += 2;
2957      // Parse the lane specifier if present.
2958      VectorLaneTy NextLaneKind;
2959      unsigned NextLaneIndex;
2960      SMLoc EndLoc = Parser.getTok().getLoc();
2961      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2962        return MatchOperand_ParseFail;
2963      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2964        Error(EndLoc, "mismatched lane index in register list");
2965        return MatchOperand_ParseFail;
2966      }
2967      continue;
2968    }
2969    // Normal D register.
2970    // Figure out the register spacing (single or double) of the list if
2971    // we don't know it already.
2972    if (!Spacing)
2973      Spacing = 1 + (Reg == OldReg + 2);
2974
2975    // Just check that it's contiguous and keep going.
2976    if (Reg != OldReg + Spacing) {
2977      Error(RegLoc, "non-contiguous register range");
2978      return MatchOperand_ParseFail;
2979    }
2980    ++Count;
2981    // Parse the lane specifier if present.
2982    VectorLaneTy NextLaneKind;
2983    unsigned NextLaneIndex;
2984    SMLoc EndLoc = Parser.getTok().getLoc();
2985    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2986      return MatchOperand_ParseFail;
2987    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2988      Error(EndLoc, "mismatched lane index in register list");
2989      return MatchOperand_ParseFail;
2990    }
2991    if (Spacing == 2 && LaneKind != NoLanes) {
2992      Error(EndLoc,
2993            "lane index specfier invalid in double spaced register list");
2994      return MatchOperand_ParseFail;
2995    }
2996  }
2997
2998  SMLoc E = Parser.getTok().getLoc();
2999  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3000    Error(E, "'}' expected");
3001    return MatchOperand_ParseFail;
3002  }
3003  Parser.Lex(); // Eat '}' token.
3004
3005  switch (LaneKind) {
3006  default:
3007    assert(0 && "unexpected lane kind in register list.");
3008  case NoLanes:
3009    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3010                                                    (Spacing == 2), S, E));
3011    break;
3012  case AllLanes:
3013    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3014                                                            S, E));
3015    break;
3016  case IndexedLane:
3017    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3018                                                           LaneIndex, S, E));
3019    break;
3020  }
3021  return MatchOperand_Success;
3022}
3023
3024/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3025ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3026parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3027  SMLoc S = Parser.getTok().getLoc();
3028  const AsmToken &Tok = Parser.getTok();
3029  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3030  StringRef OptStr = Tok.getString();
3031
3032  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3033    .Case("sy",    ARM_MB::SY)
3034    .Case("st",    ARM_MB::ST)
3035    .Case("sh",    ARM_MB::ISH)
3036    .Case("ish",   ARM_MB::ISH)
3037    .Case("shst",  ARM_MB::ISHST)
3038    .Case("ishst", ARM_MB::ISHST)
3039    .Case("nsh",   ARM_MB::NSH)
3040    .Case("un",    ARM_MB::NSH)
3041    .Case("nshst", ARM_MB::NSHST)
3042    .Case("unst",  ARM_MB::NSHST)
3043    .Case("osh",   ARM_MB::OSH)
3044    .Case("oshst", ARM_MB::OSHST)
3045    .Default(~0U);
3046
3047  if (Opt == ~0U)
3048    return MatchOperand_NoMatch;
3049
3050  Parser.Lex(); // Eat identifier token.
3051  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3052  return MatchOperand_Success;
3053}
3054
3055/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3056ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3057parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3058  SMLoc S = Parser.getTok().getLoc();
3059  const AsmToken &Tok = Parser.getTok();
3060  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3061  StringRef IFlagsStr = Tok.getString();
3062
3063  // An iflags string of "none" is interpreted to mean that none of the AIF
3064  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3065  unsigned IFlags = 0;
3066  if (IFlagsStr != "none") {
3067        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3068      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3069        .Case("a", ARM_PROC::A)
3070        .Case("i", ARM_PROC::I)
3071        .Case("f", ARM_PROC::F)
3072        .Default(~0U);
3073
3074      // If some specific iflag is already set, it means that some letter is
3075      // present more than once, this is not acceptable.
3076      if (Flag == ~0U || (IFlags & Flag))
3077        return MatchOperand_NoMatch;
3078
3079      IFlags |= Flag;
3080    }
3081  }
3082
3083  Parser.Lex(); // Eat identifier token.
3084  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3085  return MatchOperand_Success;
3086}
3087
3088/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3089ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3090parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3091  SMLoc S = Parser.getTok().getLoc();
3092  const AsmToken &Tok = Parser.getTok();
3093  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3094  StringRef Mask = Tok.getString();
3095
3096  if (isMClass()) {
3097    // See ARMv6-M 10.1.1
3098    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3099      .Case("apsr", 0)
3100      .Case("iapsr", 1)
3101      .Case("eapsr", 2)
3102      .Case("xpsr", 3)
3103      .Case("ipsr", 5)
3104      .Case("epsr", 6)
3105      .Case("iepsr", 7)
3106      .Case("msp", 8)
3107      .Case("psp", 9)
3108      .Case("primask", 16)
3109      .Case("basepri", 17)
3110      .Case("basepri_max", 18)
3111      .Case("faultmask", 19)
3112      .Case("control", 20)
3113      .Default(~0U);
3114
3115    if (FlagsVal == ~0U)
3116      return MatchOperand_NoMatch;
3117
3118    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3119      // basepri, basepri_max and faultmask only valid for V7m.
3120      return MatchOperand_NoMatch;
3121
3122    Parser.Lex(); // Eat identifier token.
3123    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3124    return MatchOperand_Success;
3125  }
3126
3127  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3128  size_t Start = 0, Next = Mask.find('_');
3129  StringRef Flags = "";
3130  std::string SpecReg = Mask.slice(Start, Next).lower();
3131  if (Next != StringRef::npos)
3132    Flags = Mask.slice(Next+1, Mask.size());
3133
3134  // FlagsVal contains the complete mask:
3135  // 3-0: Mask
3136  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3137  unsigned FlagsVal = 0;
3138
3139  if (SpecReg == "apsr") {
3140    FlagsVal = StringSwitch<unsigned>(Flags)
3141    .Case("nzcvq",  0x8) // same as CPSR_f
3142    .Case("g",      0x4) // same as CPSR_s
3143    .Case("nzcvqg", 0xc) // same as CPSR_fs
3144    .Default(~0U);
3145
3146    if (FlagsVal == ~0U) {
3147      if (!Flags.empty())
3148        return MatchOperand_NoMatch;
3149      else
3150        FlagsVal = 8; // No flag
3151    }
3152  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3153    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3154      Flags = "fc";
3155    for (int i = 0, e = Flags.size(); i != e; ++i) {
3156      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3157      .Case("c", 1)
3158      .Case("x", 2)
3159      .Case("s", 4)
3160      .Case("f", 8)
3161      .Default(~0U);
3162
3163      // If some specific flag is already set, it means that some letter is
3164      // present more than once, this is not acceptable.
3165      if (FlagsVal == ~0U || (FlagsVal & Flag))
3166        return MatchOperand_NoMatch;
3167      FlagsVal |= Flag;
3168    }
3169  } else // No match for special register.
3170    return MatchOperand_NoMatch;
3171
3172  // Special register without flags is NOT equivalent to "fc" flags.
3173  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3174  // two lines would enable gas compatibility at the expense of breaking
3175  // round-tripping.
3176  //
3177  // if (!FlagsVal)
3178  //  FlagsVal = 0x9;
3179
3180  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3181  if (SpecReg == "spsr")
3182    FlagsVal |= 16;
3183
3184  Parser.Lex(); // Eat identifier token.
3185  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3186  return MatchOperand_Success;
3187}
3188
3189ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3190parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3191            int Low, int High) {
3192  const AsmToken &Tok = Parser.getTok();
3193  if (Tok.isNot(AsmToken::Identifier)) {
3194    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3195    return MatchOperand_ParseFail;
3196  }
3197  StringRef ShiftName = Tok.getString();
3198  std::string LowerOp = Op.lower();
3199  std::string UpperOp = Op.upper();
3200  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3201    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3202    return MatchOperand_ParseFail;
3203  }
3204  Parser.Lex(); // Eat shift type token.
3205
3206  // There must be a '#' and a shift amount.
3207  if (Parser.getTok().isNot(AsmToken::Hash) &&
3208      Parser.getTok().isNot(AsmToken::Dollar)) {
3209    Error(Parser.getTok().getLoc(), "'#' expected");
3210    return MatchOperand_ParseFail;
3211  }
3212  Parser.Lex(); // Eat hash token.
3213
3214  const MCExpr *ShiftAmount;
3215  SMLoc Loc = Parser.getTok().getLoc();
3216  if (getParser().ParseExpression(ShiftAmount)) {
3217    Error(Loc, "illegal expression");
3218    return MatchOperand_ParseFail;
3219  }
3220  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3221  if (!CE) {
3222    Error(Loc, "constant expression expected");
3223    return MatchOperand_ParseFail;
3224  }
3225  int Val = CE->getValue();
3226  if (Val < Low || Val > High) {
3227    Error(Loc, "immediate value out of range");
3228    return MatchOperand_ParseFail;
3229  }
3230
3231  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3232
3233  return MatchOperand_Success;
3234}
3235
3236ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3237parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3238  const AsmToken &Tok = Parser.getTok();
3239  SMLoc S = Tok.getLoc();
3240  if (Tok.isNot(AsmToken::Identifier)) {
3241    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3242    return MatchOperand_ParseFail;
3243  }
3244  int Val = StringSwitch<int>(Tok.getString())
3245    .Case("be", 1)
3246    .Case("le", 0)
3247    .Default(-1);
3248  Parser.Lex(); // Eat the token.
3249
3250  if (Val == -1) {
3251    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3252    return MatchOperand_ParseFail;
3253  }
3254  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3255                                                                  getContext()),
3256                                           S, Parser.getTok().getLoc()));
3257  return MatchOperand_Success;
3258}
3259
3260/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3261/// instructions. Legal values are:
3262///     lsl #n  'n' in [0,31]
3263///     asr #n  'n' in [1,32]
3264///             n == 32 encoded as n == 0.
3265ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3266parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3267  const AsmToken &Tok = Parser.getTok();
3268  SMLoc S = Tok.getLoc();
3269  if (Tok.isNot(AsmToken::Identifier)) {
3270    Error(S, "shift operator 'asr' or 'lsl' expected");
3271    return MatchOperand_ParseFail;
3272  }
3273  StringRef ShiftName = Tok.getString();
3274  bool isASR;
3275  if (ShiftName == "lsl" || ShiftName == "LSL")
3276    isASR = false;
3277  else if (ShiftName == "asr" || ShiftName == "ASR")
3278    isASR = true;
3279  else {
3280    Error(S, "shift operator 'asr' or 'lsl' expected");
3281    return MatchOperand_ParseFail;
3282  }
3283  Parser.Lex(); // Eat the operator.
3284
3285  // A '#' and a shift amount.
3286  if (Parser.getTok().isNot(AsmToken::Hash) &&
3287      Parser.getTok().isNot(AsmToken::Dollar)) {
3288    Error(Parser.getTok().getLoc(), "'#' expected");
3289    return MatchOperand_ParseFail;
3290  }
3291  Parser.Lex(); // Eat hash token.
3292
3293  const MCExpr *ShiftAmount;
3294  SMLoc E = Parser.getTok().getLoc();
3295  if (getParser().ParseExpression(ShiftAmount)) {
3296    Error(E, "malformed shift expression");
3297    return MatchOperand_ParseFail;
3298  }
3299  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3300  if (!CE) {
3301    Error(E, "shift amount must be an immediate");
3302    return MatchOperand_ParseFail;
3303  }
3304
3305  int64_t Val = CE->getValue();
3306  if (isASR) {
3307    // Shift amount must be in [1,32]
3308    if (Val < 1 || Val > 32) {
3309      Error(E, "'asr' shift amount must be in range [1,32]");
3310      return MatchOperand_ParseFail;
3311    }
3312    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3313    if (isThumb() && Val == 32) {
3314      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3315      return MatchOperand_ParseFail;
3316    }
3317    if (Val == 32) Val = 0;
3318  } else {
3319    // Shift amount must be in [1,32]
3320    if (Val < 0 || Val > 31) {
3321      Error(E, "'lsr' shift amount must be in range [0,31]");
3322      return MatchOperand_ParseFail;
3323    }
3324  }
3325
3326  E = Parser.getTok().getLoc();
3327  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3328
3329  return MatchOperand_Success;
3330}
3331
3332/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3333/// of instructions. Legal values are:
3334///     ror #n  'n' in {0, 8, 16, 24}
3335ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3336parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3337  const AsmToken &Tok = Parser.getTok();
3338  SMLoc S = Tok.getLoc();
3339  if (Tok.isNot(AsmToken::Identifier))
3340    return MatchOperand_NoMatch;
3341  StringRef ShiftName = Tok.getString();
3342  if (ShiftName != "ror" && ShiftName != "ROR")
3343    return MatchOperand_NoMatch;
3344  Parser.Lex(); // Eat the operator.
3345
3346  // A '#' and a rotate amount.
3347  if (Parser.getTok().isNot(AsmToken::Hash) &&
3348      Parser.getTok().isNot(AsmToken::Dollar)) {
3349    Error(Parser.getTok().getLoc(), "'#' expected");
3350    return MatchOperand_ParseFail;
3351  }
3352  Parser.Lex(); // Eat hash token.
3353
3354  const MCExpr *ShiftAmount;
3355  SMLoc E = Parser.getTok().getLoc();
3356  if (getParser().ParseExpression(ShiftAmount)) {
3357    Error(E, "malformed rotate expression");
3358    return MatchOperand_ParseFail;
3359  }
3360  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3361  if (!CE) {
3362    Error(E, "rotate amount must be an immediate");
3363    return MatchOperand_ParseFail;
3364  }
3365
3366  int64_t Val = CE->getValue();
3367  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3368  // normally, zero is represented in asm by omitting the rotate operand
3369  // entirely.
3370  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3371    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3372    return MatchOperand_ParseFail;
3373  }
3374
3375  E = Parser.getTok().getLoc();
3376  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3377
3378  return MatchOperand_Success;
3379}
3380
3381ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3382parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3383  SMLoc S = Parser.getTok().getLoc();
3384  // The bitfield descriptor is really two operands, the LSB and the width.
3385  if (Parser.getTok().isNot(AsmToken::Hash) &&
3386      Parser.getTok().isNot(AsmToken::Dollar)) {
3387    Error(Parser.getTok().getLoc(), "'#' expected");
3388    return MatchOperand_ParseFail;
3389  }
3390  Parser.Lex(); // Eat hash token.
3391
3392  const MCExpr *LSBExpr;
3393  SMLoc E = Parser.getTok().getLoc();
3394  if (getParser().ParseExpression(LSBExpr)) {
3395    Error(E, "malformed immediate expression");
3396    return MatchOperand_ParseFail;
3397  }
3398  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3399  if (!CE) {
3400    Error(E, "'lsb' operand must be an immediate");
3401    return MatchOperand_ParseFail;
3402  }
3403
3404  int64_t LSB = CE->getValue();
3405  // The LSB must be in the range [0,31]
3406  if (LSB < 0 || LSB > 31) {
3407    Error(E, "'lsb' operand must be in the range [0,31]");
3408    return MatchOperand_ParseFail;
3409  }
3410  E = Parser.getTok().getLoc();
3411
3412  // Expect another immediate operand.
3413  if (Parser.getTok().isNot(AsmToken::Comma)) {
3414    Error(Parser.getTok().getLoc(), "too few operands");
3415    return MatchOperand_ParseFail;
3416  }
3417  Parser.Lex(); // Eat hash token.
3418  if (Parser.getTok().isNot(AsmToken::Hash) &&
3419      Parser.getTok().isNot(AsmToken::Dollar)) {
3420    Error(Parser.getTok().getLoc(), "'#' expected");
3421    return MatchOperand_ParseFail;
3422  }
3423  Parser.Lex(); // Eat hash token.
3424
3425  const MCExpr *WidthExpr;
3426  if (getParser().ParseExpression(WidthExpr)) {
3427    Error(E, "malformed immediate expression");
3428    return MatchOperand_ParseFail;
3429  }
3430  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3431  if (!CE) {
3432    Error(E, "'width' operand must be an immediate");
3433    return MatchOperand_ParseFail;
3434  }
3435
3436  int64_t Width = CE->getValue();
3437  // The LSB must be in the range [1,32-lsb]
3438  if (Width < 1 || Width > 32 - LSB) {
3439    Error(E, "'width' operand must be in the range [1,32-lsb]");
3440    return MatchOperand_ParseFail;
3441  }
3442  E = Parser.getTok().getLoc();
3443
3444  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3445
3446  return MatchOperand_Success;
3447}
3448
3449ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3450parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3451  // Check for a post-index addressing register operand. Specifically:
3452  // postidx_reg := '+' register {, shift}
3453  //              | '-' register {, shift}
3454  //              | register {, shift}
3455
3456  // This method must return MatchOperand_NoMatch without consuming any tokens
3457  // in the case where there is no match, as other alternatives take other
3458  // parse methods.
3459  AsmToken Tok = Parser.getTok();
3460  SMLoc S = Tok.getLoc();
3461  bool haveEaten = false;
3462  bool isAdd = true;
3463  int Reg = -1;
3464  if (Tok.is(AsmToken::Plus)) {
3465    Parser.Lex(); // Eat the '+' token.
3466    haveEaten = true;
3467  } else if (Tok.is(AsmToken::Minus)) {
3468    Parser.Lex(); // Eat the '-' token.
3469    isAdd = false;
3470    haveEaten = true;
3471  }
3472  if (Parser.getTok().is(AsmToken::Identifier))
3473    Reg = tryParseRegister();
3474  if (Reg == -1) {
3475    if (!haveEaten)
3476      return MatchOperand_NoMatch;
3477    Error(Parser.getTok().getLoc(), "register expected");
3478    return MatchOperand_ParseFail;
3479  }
3480  SMLoc E = Parser.getTok().getLoc();
3481
3482  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3483  unsigned ShiftImm = 0;
3484  if (Parser.getTok().is(AsmToken::Comma)) {
3485    Parser.Lex(); // Eat the ','.
3486    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3487      return MatchOperand_ParseFail;
3488  }
3489
3490  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3491                                                  ShiftImm, S, E));
3492
3493  return MatchOperand_Success;
3494}
3495
3496ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3497parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3498  // Check for a post-index addressing register operand. Specifically:
3499  // am3offset := '+' register
3500  //              | '-' register
3501  //              | register
3502  //              | # imm
3503  //              | # + imm
3504  //              | # - imm
3505
3506  // This method must return MatchOperand_NoMatch without consuming any tokens
3507  // in the case where there is no match, as other alternatives take other
3508  // parse methods.
3509  AsmToken Tok = Parser.getTok();
3510  SMLoc S = Tok.getLoc();
3511
3512  // Do immediates first, as we always parse those if we have a '#'.
3513  if (Parser.getTok().is(AsmToken::Hash) ||
3514      Parser.getTok().is(AsmToken::Dollar)) {
3515    Parser.Lex(); // Eat the '#'.
3516    // Explicitly look for a '-', as we need to encode negative zero
3517    // differently.
3518    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3519    const MCExpr *Offset;
3520    if (getParser().ParseExpression(Offset))
3521      return MatchOperand_ParseFail;
3522    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3523    if (!CE) {
3524      Error(S, "constant expression expected");
3525      return MatchOperand_ParseFail;
3526    }
3527    SMLoc E = Tok.getLoc();
3528    // Negative zero is encoded as the flag value INT32_MIN.
3529    int32_t Val = CE->getValue();
3530    if (isNegative && Val == 0)
3531      Val = INT32_MIN;
3532
3533    Operands.push_back(
3534      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3535
3536    return MatchOperand_Success;
3537  }
3538
3539
3540  bool haveEaten = false;
3541  bool isAdd = true;
3542  int Reg = -1;
3543  if (Tok.is(AsmToken::Plus)) {
3544    Parser.Lex(); // Eat the '+' token.
3545    haveEaten = true;
3546  } else if (Tok.is(AsmToken::Minus)) {
3547    Parser.Lex(); // Eat the '-' token.
3548    isAdd = false;
3549    haveEaten = true;
3550  }
3551  if (Parser.getTok().is(AsmToken::Identifier))
3552    Reg = tryParseRegister();
3553  if (Reg == -1) {
3554    if (!haveEaten)
3555      return MatchOperand_NoMatch;
3556    Error(Parser.getTok().getLoc(), "register expected");
3557    return MatchOperand_ParseFail;
3558  }
3559  SMLoc E = Parser.getTok().getLoc();
3560
3561  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3562                                                  0, S, E));
3563
3564  return MatchOperand_Success;
3565}
3566
3567/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3568/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3569/// when they refer multiple MIOperands inside a single one.
3570bool ARMAsmParser::
3571cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3572             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3573  // Rt, Rt2
3574  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3575  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3576  // Create a writeback register dummy placeholder.
3577  Inst.addOperand(MCOperand::CreateReg(0));
3578  // addr
3579  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3580  // pred
3581  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3582  return true;
3583}
3584
3585/// cvtT2StrdPre - Convert parsed operands to MCInst.
3586/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3587/// when they refer multiple MIOperands inside a single one.
3588bool ARMAsmParser::
3589cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3590             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3591  // Create a writeback register dummy placeholder.
3592  Inst.addOperand(MCOperand::CreateReg(0));
3593  // Rt, Rt2
3594  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3595  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3596  // addr
3597  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3598  // pred
3599  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3600  return true;
3601}
3602
3603/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3604/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3605/// when they refer multiple MIOperands inside a single one.
3606bool ARMAsmParser::
3607cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3608                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3609  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3610
3611  // Create a writeback register dummy placeholder.
3612  Inst.addOperand(MCOperand::CreateImm(0));
3613
3614  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3615  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3616  return true;
3617}
3618
3619/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3620/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3621/// when they refer multiple MIOperands inside a single one.
3622bool ARMAsmParser::
3623cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3624                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3625  // Create a writeback register dummy placeholder.
3626  Inst.addOperand(MCOperand::CreateImm(0));
3627  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3628  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3629  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3630  return true;
3631}
3632
3633/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3634/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3635/// when they refer multiple MIOperands inside a single one.
3636bool ARMAsmParser::
3637cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3638                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3639  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3640
3641  // Create a writeback register dummy placeholder.
3642  Inst.addOperand(MCOperand::CreateImm(0));
3643
3644  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3645  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3646  return true;
3647}
3648
3649/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3650/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3651/// when they refer multiple MIOperands inside a single one.
3652bool ARMAsmParser::
3653cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3654                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3655  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3656
3657  // Create a writeback register dummy placeholder.
3658  Inst.addOperand(MCOperand::CreateImm(0));
3659
3660  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3661  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3662  return true;
3663}
3664
3665
3666/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3667/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3668/// when they refer multiple MIOperands inside a single one.
3669bool ARMAsmParser::
3670cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3671                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3672  // Create a writeback register dummy placeholder.
3673  Inst.addOperand(MCOperand::CreateImm(0));
3674  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3675  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3676  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3677  return true;
3678}
3679
3680/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3681/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3682/// when they refer multiple MIOperands inside a single one.
3683bool ARMAsmParser::
3684cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3685                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3686  // Create a writeback register dummy placeholder.
3687  Inst.addOperand(MCOperand::CreateImm(0));
3688  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3689  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3690  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3691  return true;
3692}
3693
3694/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3695/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3696/// when they refer multiple MIOperands inside a single one.
3697bool ARMAsmParser::
3698cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3699                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3700  // Create a writeback register dummy placeholder.
3701  Inst.addOperand(MCOperand::CreateImm(0));
3702  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3703  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3704  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3705  return true;
3706}
3707
3708/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3709/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3710/// when they refer multiple MIOperands inside a single one.
3711bool ARMAsmParser::
3712cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3713                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3714  // Rt
3715  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3716  // Create a writeback register dummy placeholder.
3717  Inst.addOperand(MCOperand::CreateImm(0));
3718  // addr
3719  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3720  // offset
3721  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3722  // pred
3723  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3724  return true;
3725}
3726
3727/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3728/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3729/// when they refer multiple MIOperands inside a single one.
3730bool ARMAsmParser::
3731cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3732                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3733  // Rt
3734  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3735  // Create a writeback register dummy placeholder.
3736  Inst.addOperand(MCOperand::CreateImm(0));
3737  // addr
3738  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3739  // offset
3740  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3741  // pred
3742  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3743  return true;
3744}
3745
3746/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3747/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3748/// when they refer multiple MIOperands inside a single one.
3749bool ARMAsmParser::
3750cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3751                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3752  // Create a writeback register dummy placeholder.
3753  Inst.addOperand(MCOperand::CreateImm(0));
3754  // Rt
3755  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3756  // addr
3757  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3758  // offset
3759  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3760  // pred
3761  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3762  return true;
3763}
3764
3765/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3766/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3767/// when they refer multiple MIOperands inside a single one.
3768bool ARMAsmParser::
3769cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3770                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3771  // Create a writeback register dummy placeholder.
3772  Inst.addOperand(MCOperand::CreateImm(0));
3773  // Rt
3774  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3775  // addr
3776  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3777  // offset
3778  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3779  // pred
3780  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3781  return true;
3782}
3783
3784/// cvtLdrdPre - Convert parsed operands to MCInst.
3785/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3786/// when they refer multiple MIOperands inside a single one.
3787bool ARMAsmParser::
3788cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3789           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3790  // Rt, Rt2
3791  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3792  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3793  // Create a writeback register dummy placeholder.
3794  Inst.addOperand(MCOperand::CreateImm(0));
3795  // addr
3796  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3797  // pred
3798  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3799  return true;
3800}
3801
3802/// cvtStrdPre - Convert parsed operands to MCInst.
3803/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3804/// when they refer multiple MIOperands inside a single one.
3805bool ARMAsmParser::
3806cvtStrdPre(MCInst &Inst, unsigned Opcode,
3807           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3808  // Create a writeback register dummy placeholder.
3809  Inst.addOperand(MCOperand::CreateImm(0));
3810  // Rt, Rt2
3811  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3812  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3813  // addr
3814  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3815  // pred
3816  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3817  return true;
3818}
3819
3820/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3821/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3822/// when they refer multiple MIOperands inside a single one.
3823bool ARMAsmParser::
3824cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3825                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3826  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3827  // Create a writeback register dummy placeholder.
3828  Inst.addOperand(MCOperand::CreateImm(0));
3829  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3830  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3831  return true;
3832}
3833
3834/// cvtThumbMultiple- Convert parsed operands to MCInst.
3835/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3836/// when they refer multiple MIOperands inside a single one.
3837bool ARMAsmParser::
3838cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3839           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3840  // The second source operand must be the same register as the destination
3841  // operand.
3842  if (Operands.size() == 6 &&
3843      (((ARMOperand*)Operands[3])->getReg() !=
3844       ((ARMOperand*)Operands[5])->getReg()) &&
3845      (((ARMOperand*)Operands[3])->getReg() !=
3846       ((ARMOperand*)Operands[4])->getReg())) {
3847    Error(Operands[3]->getStartLoc(),
3848          "destination register must match source register");
3849    return false;
3850  }
3851  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3852  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3853  // If we have a three-operand form, make sure to set Rn to be the operand
3854  // that isn't the same as Rd.
3855  unsigned RegOp = 4;
3856  if (Operands.size() == 6 &&
3857      ((ARMOperand*)Operands[4])->getReg() ==
3858        ((ARMOperand*)Operands[3])->getReg())
3859    RegOp = 5;
3860  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3861  Inst.addOperand(Inst.getOperand(0));
3862  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3863
3864  return true;
3865}
3866
3867bool ARMAsmParser::
3868cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3869              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3870  // Vd
3871  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3872  // Create a writeback register dummy placeholder.
3873  Inst.addOperand(MCOperand::CreateImm(0));
3874  // Vn
3875  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3876  // pred
3877  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3878  return true;
3879}
3880
3881bool ARMAsmParser::
3882cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3883                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3884  // Vd
3885  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3886  // Create a writeback register dummy placeholder.
3887  Inst.addOperand(MCOperand::CreateImm(0));
3888  // Vn
3889  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3890  // Vm
3891  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3892  // pred
3893  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3894  return true;
3895}
3896
3897bool ARMAsmParser::
3898cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3899              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3900  // Create a writeback register dummy placeholder.
3901  Inst.addOperand(MCOperand::CreateImm(0));
3902  // Vn
3903  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3904  // Vt
3905  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3906  // pred
3907  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3908  return true;
3909}
3910
3911bool ARMAsmParser::
3912cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3913                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3914  // Create a writeback register dummy placeholder.
3915  Inst.addOperand(MCOperand::CreateImm(0));
3916  // Vn
3917  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3918  // Vm
3919  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3920  // Vt
3921  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3922  // pred
3923  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3924  return true;
3925}
3926
3927/// Parse an ARM memory expression, return false if successful else return true
3928/// or an error.  The first token must be a '[' when called.
3929bool ARMAsmParser::
3930parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3931  SMLoc S, E;
3932  assert(Parser.getTok().is(AsmToken::LBrac) &&
3933         "Token is not a Left Bracket");
3934  S = Parser.getTok().getLoc();
3935  Parser.Lex(); // Eat left bracket token.
3936
3937  const AsmToken &BaseRegTok = Parser.getTok();
3938  int BaseRegNum = tryParseRegister();
3939  if (BaseRegNum == -1)
3940    return Error(BaseRegTok.getLoc(), "register expected");
3941
3942  // The next token must either be a comma or a closing bracket.
3943  const AsmToken &Tok = Parser.getTok();
3944  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3945    return Error(Tok.getLoc(), "malformed memory operand");
3946
3947  if (Tok.is(AsmToken::RBrac)) {
3948    E = Tok.getLoc();
3949    Parser.Lex(); // Eat right bracket token.
3950
3951    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3952                                             0, 0, false, S, E));
3953
3954    // If there's a pre-indexing writeback marker, '!', just add it as a token
3955    // operand. It's rather odd, but syntactically valid.
3956    if (Parser.getTok().is(AsmToken::Exclaim)) {
3957      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3958      Parser.Lex(); // Eat the '!'.
3959    }
3960
3961    return false;
3962  }
3963
3964  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
3965  Parser.Lex(); // Eat the comma.
3966
3967  // If we have a ':', it's an alignment specifier.
3968  if (Parser.getTok().is(AsmToken::Colon)) {
3969    Parser.Lex(); // Eat the ':'.
3970    E = Parser.getTok().getLoc();
3971
3972    const MCExpr *Expr;
3973    if (getParser().ParseExpression(Expr))
3974     return true;
3975
3976    // The expression has to be a constant. Memory references with relocations
3977    // don't come through here, as they use the <label> forms of the relevant
3978    // instructions.
3979    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3980    if (!CE)
3981      return Error (E, "constant expression expected");
3982
3983    unsigned Align = 0;
3984    switch (CE->getValue()) {
3985    default:
3986      return Error(E,
3987                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
3988    case 16:  Align = 2; break;
3989    case 32:  Align = 4; break;
3990    case 64:  Align = 8; break;
3991    case 128: Align = 16; break;
3992    case 256: Align = 32; break;
3993    }
3994
3995    // Now we should have the closing ']'
3996    E = Parser.getTok().getLoc();
3997    if (Parser.getTok().isNot(AsmToken::RBrac))
3998      return Error(E, "']' expected");
3999    Parser.Lex(); // Eat right bracket token.
4000
4001    // Don't worry about range checking the value here. That's handled by
4002    // the is*() predicates.
4003    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4004                                             ARM_AM::no_shift, 0, Align,
4005                                             false, S, E));
4006
4007    // If there's a pre-indexing writeback marker, '!', just add it as a token
4008    // operand.
4009    if (Parser.getTok().is(AsmToken::Exclaim)) {
4010      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4011      Parser.Lex(); // Eat the '!'.
4012    }
4013
4014    return false;
4015  }
4016
4017  // If we have a '#', it's an immediate offset, else assume it's a register
4018  // offset. Be friendly and also accept a plain integer (without a leading
4019  // hash) for gas compatibility.
4020  if (Parser.getTok().is(AsmToken::Hash) ||
4021      Parser.getTok().is(AsmToken::Dollar) ||
4022      Parser.getTok().is(AsmToken::Integer)) {
4023    if (Parser.getTok().isNot(AsmToken::Integer))
4024      Parser.Lex(); // Eat the '#'.
4025    E = Parser.getTok().getLoc();
4026
4027    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4028    const MCExpr *Offset;
4029    if (getParser().ParseExpression(Offset))
4030     return true;
4031
4032    // The expression has to be a constant. Memory references with relocations
4033    // don't come through here, as they use the <label> forms of the relevant
4034    // instructions.
4035    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4036    if (!CE)
4037      return Error (E, "constant expression expected");
4038
4039    // If the constant was #-0, represent it as INT32_MIN.
4040    int32_t Val = CE->getValue();
4041    if (isNegative && Val == 0)
4042      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4043
4044    // Now we should have the closing ']'
4045    E = Parser.getTok().getLoc();
4046    if (Parser.getTok().isNot(AsmToken::RBrac))
4047      return Error(E, "']' expected");
4048    Parser.Lex(); // Eat right bracket token.
4049
4050    // Don't worry about range checking the value here. That's handled by
4051    // the is*() predicates.
4052    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4053                                             ARM_AM::no_shift, 0, 0,
4054                                             false, S, E));
4055
4056    // If there's a pre-indexing writeback marker, '!', just add it as a token
4057    // operand.
4058    if (Parser.getTok().is(AsmToken::Exclaim)) {
4059      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4060      Parser.Lex(); // Eat the '!'.
4061    }
4062
4063    return false;
4064  }
4065
4066  // The register offset is optionally preceded by a '+' or '-'
4067  bool isNegative = false;
4068  if (Parser.getTok().is(AsmToken::Minus)) {
4069    isNegative = true;
4070    Parser.Lex(); // Eat the '-'.
4071  } else if (Parser.getTok().is(AsmToken::Plus)) {
4072    // Nothing to do.
4073    Parser.Lex(); // Eat the '+'.
4074  }
4075
4076  E = Parser.getTok().getLoc();
4077  int OffsetRegNum = tryParseRegister();
4078  if (OffsetRegNum == -1)
4079    return Error(E, "register expected");
4080
4081  // If there's a shift operator, handle it.
4082  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4083  unsigned ShiftImm = 0;
4084  if (Parser.getTok().is(AsmToken::Comma)) {
4085    Parser.Lex(); // Eat the ','.
4086    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4087      return true;
4088  }
4089
4090  // Now we should have the closing ']'
4091  E = Parser.getTok().getLoc();
4092  if (Parser.getTok().isNot(AsmToken::RBrac))
4093    return Error(E, "']' expected");
4094  Parser.Lex(); // Eat right bracket token.
4095
4096  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4097                                           ShiftType, ShiftImm, 0, isNegative,
4098                                           S, E));
4099
4100  // If there's a pre-indexing writeback marker, '!', just add it as a token
4101  // operand.
4102  if (Parser.getTok().is(AsmToken::Exclaim)) {
4103    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4104    Parser.Lex(); // Eat the '!'.
4105  }
4106
4107  return false;
4108}
4109
4110/// parseMemRegOffsetShift - one of these two:
4111///   ( lsl | lsr | asr | ror ) , # shift_amount
4112///   rrx
4113/// return true if it parses a shift otherwise it returns false.
4114bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4115                                          unsigned &Amount) {
4116  SMLoc Loc = Parser.getTok().getLoc();
4117  const AsmToken &Tok = Parser.getTok();
4118  if (Tok.isNot(AsmToken::Identifier))
4119    return true;
4120  StringRef ShiftName = Tok.getString();
4121  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4122      ShiftName == "asl" || ShiftName == "ASL")
4123    St = ARM_AM::lsl;
4124  else if (ShiftName == "lsr" || ShiftName == "LSR")
4125    St = ARM_AM::lsr;
4126  else if (ShiftName == "asr" || ShiftName == "ASR")
4127    St = ARM_AM::asr;
4128  else if (ShiftName == "ror" || ShiftName == "ROR")
4129    St = ARM_AM::ror;
4130  else if (ShiftName == "rrx" || ShiftName == "RRX")
4131    St = ARM_AM::rrx;
4132  else
4133    return Error(Loc, "illegal shift operator");
4134  Parser.Lex(); // Eat shift type token.
4135
4136  // rrx stands alone.
4137  Amount = 0;
4138  if (St != ARM_AM::rrx) {
4139    Loc = Parser.getTok().getLoc();
4140    // A '#' and a shift amount.
4141    const AsmToken &HashTok = Parser.getTok();
4142    if (HashTok.isNot(AsmToken::Hash) &&
4143        HashTok.isNot(AsmToken::Dollar))
4144      return Error(HashTok.getLoc(), "'#' expected");
4145    Parser.Lex(); // Eat hash token.
4146
4147    const MCExpr *Expr;
4148    if (getParser().ParseExpression(Expr))
4149      return true;
4150    // Range check the immediate.
4151    // lsl, ror: 0 <= imm <= 31
4152    // lsr, asr: 0 <= imm <= 32
4153    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4154    if (!CE)
4155      return Error(Loc, "shift amount must be an immediate");
4156    int64_t Imm = CE->getValue();
4157    if (Imm < 0 ||
4158        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4159        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4160      return Error(Loc, "immediate shift value out of range");
4161    Amount = Imm;
4162  }
4163
4164  return false;
4165}
4166
4167/// parseFPImm - A floating point immediate expression operand.
4168ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4169parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4170  SMLoc S = Parser.getTok().getLoc();
4171
4172  if (Parser.getTok().isNot(AsmToken::Hash) &&
4173      Parser.getTok().isNot(AsmToken::Dollar))
4174    return MatchOperand_NoMatch;
4175
4176  // Disambiguate the VMOV forms that can accept an FP immediate.
4177  // vmov.f32 <sreg>, #imm
4178  // vmov.f64 <dreg>, #imm
4179  // vmov.f32 <dreg>, #imm  @ vector f32x2
4180  // vmov.f32 <qreg>, #imm  @ vector f32x4
4181  //
4182  // There are also the NEON VMOV instructions which expect an
4183  // integer constant. Make sure we don't try to parse an FPImm
4184  // for these:
4185  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4186  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4187  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4188                           TyOp->getToken() != ".f64"))
4189    return MatchOperand_NoMatch;
4190
4191  Parser.Lex(); // Eat the '#'.
4192
4193  // Handle negation, as that still comes through as a separate token.
4194  bool isNegative = false;
4195  if (Parser.getTok().is(AsmToken::Minus)) {
4196    isNegative = true;
4197    Parser.Lex();
4198  }
4199  const AsmToken &Tok = Parser.getTok();
4200  if (Tok.is(AsmToken::Real)) {
4201    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4202    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4203    // If we had a '-' in front, toggle the sign bit.
4204    IntVal ^= (uint64_t)isNegative << 63;
4205    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4206    Parser.Lex(); // Eat the token.
4207    if (Val == -1) {
4208      TokError("floating point value out of range");
4209      return MatchOperand_ParseFail;
4210    }
4211    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4212    return MatchOperand_Success;
4213  }
4214  if (Tok.is(AsmToken::Integer)) {
4215    int64_t Val = Tok.getIntVal();
4216    Parser.Lex(); // Eat the token.
4217    if (Val > 255 || Val < 0) {
4218      TokError("encoded floating point value out of range");
4219      return MatchOperand_ParseFail;
4220    }
4221    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4222    return MatchOperand_Success;
4223  }
4224
4225  TokError("invalid floating point immediate");
4226  return MatchOperand_ParseFail;
4227}
4228/// Parse a arm instruction operand.  For now this parses the operand regardless
4229/// of the mnemonic.
4230bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4231                                StringRef Mnemonic) {
4232  SMLoc S, E;
4233
4234  // Check if the current operand has a custom associated parser, if so, try to
4235  // custom parse the operand, or fallback to the general approach.
4236  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4237  if (ResTy == MatchOperand_Success)
4238    return false;
4239  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4240  // there was a match, but an error occurred, in which case, just return that
4241  // the operand parsing failed.
4242  if (ResTy == MatchOperand_ParseFail)
4243    return true;
4244
4245  switch (getLexer().getKind()) {
4246  default:
4247    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4248    return true;
4249  case AsmToken::Identifier: {
4250    // If this is VMRS, check for the apsr_nzcv operand.
4251    if (!tryParseRegisterWithWriteBack(Operands))
4252      return false;
4253    int Res = tryParseShiftRegister(Operands);
4254    if (Res == 0) // success
4255      return false;
4256    else if (Res == -1) // irrecoverable error
4257      return true;
4258    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4259      S = Parser.getTok().getLoc();
4260      Parser.Lex();
4261      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4262      return false;
4263    }
4264
4265    // Fall though for the Identifier case that is not a register or a
4266    // special name.
4267  }
4268  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4269  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4270  case AsmToken::String:  // quoted label names.
4271  case AsmToken::Dot: {   // . as a branch target
4272    // This was not a register so parse other operands that start with an
4273    // identifier (like labels) as expressions and create them as immediates.
4274    const MCExpr *IdVal;
4275    S = Parser.getTok().getLoc();
4276    if (getParser().ParseExpression(IdVal))
4277      return true;
4278    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4279    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4280    return false;
4281  }
4282  case AsmToken::LBrac:
4283    return parseMemory(Operands);
4284  case AsmToken::LCurly:
4285    return parseRegisterList(Operands);
4286  case AsmToken::Dollar:
4287  case AsmToken::Hash: {
4288    // #42 -> immediate.
4289    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4290    S = Parser.getTok().getLoc();
4291    Parser.Lex();
4292    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4293    const MCExpr *ImmVal;
4294    if (getParser().ParseExpression(ImmVal))
4295      return true;
4296    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4297    if (CE) {
4298      int32_t Val = CE->getValue();
4299      if (isNegative && Val == 0)
4300        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4301    }
4302    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4303    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4304    return false;
4305  }
4306  case AsmToken::Colon: {
4307    // ":lower16:" and ":upper16:" expression prefixes
4308    // FIXME: Check it's an expression prefix,
4309    // e.g. (FOO - :lower16:BAR) isn't legal.
4310    ARMMCExpr::VariantKind RefKind;
4311    if (parsePrefix(RefKind))
4312      return true;
4313
4314    const MCExpr *SubExprVal;
4315    if (getParser().ParseExpression(SubExprVal))
4316      return true;
4317
4318    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4319                                                   getContext());
4320    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4321    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4322    return false;
4323  }
4324  }
4325}
4326
4327// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4328//  :lower16: and :upper16:.
4329bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4330  RefKind = ARMMCExpr::VK_ARM_None;
4331
4332  // :lower16: and :upper16: modifiers
4333  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4334  Parser.Lex(); // Eat ':'
4335
4336  if (getLexer().isNot(AsmToken::Identifier)) {
4337    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4338    return true;
4339  }
4340
4341  StringRef IDVal = Parser.getTok().getIdentifier();
4342  if (IDVal == "lower16") {
4343    RefKind = ARMMCExpr::VK_ARM_LO16;
4344  } else if (IDVal == "upper16") {
4345    RefKind = ARMMCExpr::VK_ARM_HI16;
4346  } else {
4347    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4348    return true;
4349  }
4350  Parser.Lex();
4351
4352  if (getLexer().isNot(AsmToken::Colon)) {
4353    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4354    return true;
4355  }
4356  Parser.Lex(); // Eat the last ':'
4357  return false;
4358}
4359
4360/// \brief Given a mnemonic, split out possible predication code and carry
4361/// setting letters to form a canonical mnemonic and flags.
4362//
4363// FIXME: Would be nice to autogen this.
4364// FIXME: This is a bit of a maze of special cases.
4365StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4366                                      unsigned &PredicationCode,
4367                                      bool &CarrySetting,
4368                                      unsigned &ProcessorIMod,
4369                                      StringRef &ITMask) {
4370  PredicationCode = ARMCC::AL;
4371  CarrySetting = false;
4372  ProcessorIMod = 0;
4373
4374  // Ignore some mnemonics we know aren't predicated forms.
4375  //
4376  // FIXME: Would be nice to autogen this.
4377  if ((Mnemonic == "movs" && isThumb()) ||
4378      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4379      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4380      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4381      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4382      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4383      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4384      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4385      Mnemonic == "fmuls")
4386    return Mnemonic;
4387
4388  // First, split out any predication code. Ignore mnemonics we know aren't
4389  // predicated but do have a carry-set and so weren't caught above.
4390  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4391      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4392      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4393      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4394    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4395      .Case("eq", ARMCC::EQ)
4396      .Case("ne", ARMCC::NE)
4397      .Case("hs", ARMCC::HS)
4398      .Case("cs", ARMCC::HS)
4399      .Case("lo", ARMCC::LO)
4400      .Case("cc", ARMCC::LO)
4401      .Case("mi", ARMCC::MI)
4402      .Case("pl", ARMCC::PL)
4403      .Case("vs", ARMCC::VS)
4404      .Case("vc", ARMCC::VC)
4405      .Case("hi", ARMCC::HI)
4406      .Case("ls", ARMCC::LS)
4407      .Case("ge", ARMCC::GE)
4408      .Case("lt", ARMCC::LT)
4409      .Case("gt", ARMCC::GT)
4410      .Case("le", ARMCC::LE)
4411      .Case("al", ARMCC::AL)
4412      .Default(~0U);
4413    if (CC != ~0U) {
4414      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4415      PredicationCode = CC;
4416    }
4417  }
4418
4419  // Next, determine if we have a carry setting bit. We explicitly ignore all
4420  // the instructions we know end in 's'.
4421  if (Mnemonic.endswith("s") &&
4422      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4423        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4424        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4425        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4426        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4427        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4428        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4429        Mnemonic == "fmuls" ||
4430        (Mnemonic == "movs" && isThumb()))) {
4431    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4432    CarrySetting = true;
4433  }
4434
4435  // The "cps" instruction can have a interrupt mode operand which is glued into
4436  // the mnemonic. Check if this is the case, split it and parse the imod op
4437  if (Mnemonic.startswith("cps")) {
4438    // Split out any imod code.
4439    unsigned IMod =
4440      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4441      .Case("ie", ARM_PROC::IE)
4442      .Case("id", ARM_PROC::ID)
4443      .Default(~0U);
4444    if (IMod != ~0U) {
4445      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4446      ProcessorIMod = IMod;
4447    }
4448  }
4449
4450  // The "it" instruction has the condition mask on the end of the mnemonic.
4451  if (Mnemonic.startswith("it")) {
4452    ITMask = Mnemonic.slice(2, Mnemonic.size());
4453    Mnemonic = Mnemonic.slice(0, 2);
4454  }
4455
4456  return Mnemonic;
4457}
4458
4459/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4460/// inclusion of carry set or predication code operands.
4461//
4462// FIXME: It would be nice to autogen this.
4463void ARMAsmParser::
4464getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4465                      bool &CanAcceptPredicationCode) {
4466  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4467      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4468      Mnemonic == "add" || Mnemonic == "adc" ||
4469      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4470      Mnemonic == "orr" || Mnemonic == "mvn" ||
4471      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4472      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4473      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4474                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4475                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4476    CanAcceptCarrySet = true;
4477  } else
4478    CanAcceptCarrySet = false;
4479
4480  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4481      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4482      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4483      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4484      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4485      (Mnemonic == "clrex" && !isThumb()) ||
4486      (Mnemonic == "nop" && isThumbOne()) ||
4487      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4488        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4489        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4490      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4491       !isThumb()) ||
4492      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4493    CanAcceptPredicationCode = false;
4494  } else
4495    CanAcceptPredicationCode = true;
4496
4497  if (isThumb()) {
4498    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4499        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4500      CanAcceptPredicationCode = false;
4501  }
4502}
4503
4504bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4505                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4506  // FIXME: This is all horribly hacky. We really need a better way to deal
4507  // with optional operands like this in the matcher table.
4508
4509  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4510  // another does not. Specifically, the MOVW instruction does not. So we
4511  // special case it here and remove the defaulted (non-setting) cc_out
4512  // operand if that's the instruction we're trying to match.
4513  //
4514  // We do this as post-processing of the explicit operands rather than just
4515  // conditionally adding the cc_out in the first place because we need
4516  // to check the type of the parsed immediate operand.
4517  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4518      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4519      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4520      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4521    return true;
4522
4523  // Register-register 'add' for thumb does not have a cc_out operand
4524  // when there are only two register operands.
4525  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4526      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4527      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4528      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4529    return true;
4530  // Register-register 'add' for thumb does not have a cc_out operand
4531  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4532  // have to check the immediate range here since Thumb2 has a variant
4533  // that can handle a different range and has a cc_out operand.
4534  if (((isThumb() && Mnemonic == "add") ||
4535       (isThumbTwo() && Mnemonic == "sub")) &&
4536      Operands.size() == 6 &&
4537      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4538      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4539      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4540      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4541      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4542       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4543    return true;
4544  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4545  // imm0_4095 variant. That's the least-preferred variant when
4546  // selecting via the generic "add" mnemonic, so to know that we
4547  // should remove the cc_out operand, we have to explicitly check that
4548  // it's not one of the other variants. Ugh.
4549  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4550      Operands.size() == 6 &&
4551      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4552      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4553      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4554    // Nest conditions rather than one big 'if' statement for readability.
4555    //
4556    // If either register is a high reg, it's either one of the SP
4557    // variants (handled above) or a 32-bit encoding, so we just
4558    // check against T3.
4559    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4560         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4561        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4562      return false;
4563    // If both registers are low, we're in an IT block, and the immediate is
4564    // in range, we should use encoding T1 instead, which has a cc_out.
4565    if (inITBlock() &&
4566        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4567        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4568        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4569      return false;
4570
4571    // Otherwise, we use encoding T4, which does not have a cc_out
4572    // operand.
4573    return true;
4574  }
4575
4576  // The thumb2 multiply instruction doesn't have a CCOut register, so
4577  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4578  // use the 16-bit encoding or not.
4579  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4580      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4581      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4582      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4583      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4584      // If the registers aren't low regs, the destination reg isn't the
4585      // same as one of the source regs, or the cc_out operand is zero
4586      // outside of an IT block, we have to use the 32-bit encoding, so
4587      // remove the cc_out operand.
4588      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4589       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4590       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4591       !inITBlock() ||
4592       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4593        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4594        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4595        static_cast<ARMOperand*>(Operands[4])->getReg())))
4596    return true;
4597
4598  // Also check the 'mul' syntax variant that doesn't specify an explicit
4599  // destination register.
4600  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4601      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4602      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4603      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4604      // If the registers aren't low regs  or the cc_out operand is zero
4605      // outside of an IT block, we have to use the 32-bit encoding, so
4606      // remove the cc_out operand.
4607      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4608       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4609       !inITBlock()))
4610    return true;
4611
4612
4613
4614  // Register-register 'add/sub' for thumb does not have a cc_out operand
4615  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4616  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4617  // right, this will result in better diagnostics (which operand is off)
4618  // anyway.
4619  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4620      (Operands.size() == 5 || Operands.size() == 6) &&
4621      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4622      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4623      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4624    return true;
4625
4626  return false;
4627}
4628
4629static bool isDataTypeToken(StringRef Tok) {
4630  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4631    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4632    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4633    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4634    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4635    Tok == ".f" || Tok == ".d";
4636}
4637
4638// FIXME: This bit should probably be handled via an explicit match class
4639// in the .td files that matches the suffix instead of having it be
4640// a literal string token the way it is now.
4641static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4642  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4643}
4644
4645static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4646/// Parse an arm instruction mnemonic followed by its operands.
4647bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4648                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4649  // Apply mnemonic aliases before doing anything else, as the destination
4650  // mnemnonic may include suffices and we want to handle them normally.
4651  // The generic tblgen'erated code does this later, at the start of
4652  // MatchInstructionImpl(), but that's too late for aliases that include
4653  // any sort of suffix.
4654  unsigned AvailableFeatures = getAvailableFeatures();
4655  applyMnemonicAliases(Name, AvailableFeatures);
4656
4657  // First check for the ARM-specific .req directive.
4658  if (Parser.getTok().is(AsmToken::Identifier) &&
4659      Parser.getTok().getIdentifier() == ".req") {
4660    parseDirectiveReq(Name, NameLoc);
4661    // We always return 'error' for this, as we're done with this
4662    // statement and don't need to match the 'instruction."
4663    return true;
4664  }
4665
4666  // Create the leading tokens for the mnemonic, split by '.' characters.
4667  size_t Start = 0, Next = Name.find('.');
4668  StringRef Mnemonic = Name.slice(Start, Next);
4669
4670  // Split out the predication code and carry setting flag from the mnemonic.
4671  unsigned PredicationCode;
4672  unsigned ProcessorIMod;
4673  bool CarrySetting;
4674  StringRef ITMask;
4675  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4676                           ProcessorIMod, ITMask);
4677
4678  // In Thumb1, only the branch (B) instruction can be predicated.
4679  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4680    Parser.EatToEndOfStatement();
4681    return Error(NameLoc, "conditional execution not supported in Thumb1");
4682  }
4683
4684  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4685
4686  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4687  // is the mask as it will be for the IT encoding if the conditional
4688  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4689  // where the conditional bit0 is zero, the instruction post-processing
4690  // will adjust the mask accordingly.
4691  if (Mnemonic == "it") {
4692    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4693    if (ITMask.size() > 3) {
4694      Parser.EatToEndOfStatement();
4695      return Error(Loc, "too many conditions on IT instruction");
4696    }
4697    unsigned Mask = 8;
4698    for (unsigned i = ITMask.size(); i != 0; --i) {
4699      char pos = ITMask[i - 1];
4700      if (pos != 't' && pos != 'e') {
4701        Parser.EatToEndOfStatement();
4702        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4703      }
4704      Mask >>= 1;
4705      if (ITMask[i - 1] == 't')
4706        Mask |= 8;
4707    }
4708    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4709  }
4710
4711  // FIXME: This is all a pretty gross hack. We should automatically handle
4712  // optional operands like this via tblgen.
4713
4714  // Next, add the CCOut and ConditionCode operands, if needed.
4715  //
4716  // For mnemonics which can ever incorporate a carry setting bit or predication
4717  // code, our matching model involves us always generating CCOut and
4718  // ConditionCode operands to match the mnemonic "as written" and then we let
4719  // the matcher deal with finding the right instruction or generating an
4720  // appropriate error.
4721  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4722  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4723
4724  // If we had a carry-set on an instruction that can't do that, issue an
4725  // error.
4726  if (!CanAcceptCarrySet && CarrySetting) {
4727    Parser.EatToEndOfStatement();
4728    return Error(NameLoc, "instruction '" + Mnemonic +
4729                 "' can not set flags, but 's' suffix specified");
4730  }
4731  // If we had a predication code on an instruction that can't do that, issue an
4732  // error.
4733  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4734    Parser.EatToEndOfStatement();
4735    return Error(NameLoc, "instruction '" + Mnemonic +
4736                 "' is not predicable, but condition code specified");
4737  }
4738
4739  // Add the carry setting operand, if necessary.
4740  if (CanAcceptCarrySet) {
4741    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4742    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4743                                               Loc));
4744  }
4745
4746  // Add the predication code operand, if necessary.
4747  if (CanAcceptPredicationCode) {
4748    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4749                                      CarrySetting);
4750    Operands.push_back(ARMOperand::CreateCondCode(
4751                         ARMCC::CondCodes(PredicationCode), Loc));
4752  }
4753
4754  // Add the processor imod operand, if necessary.
4755  if (ProcessorIMod) {
4756    Operands.push_back(ARMOperand::CreateImm(
4757          MCConstantExpr::Create(ProcessorIMod, getContext()),
4758                                 NameLoc, NameLoc));
4759  }
4760
4761  // Add the remaining tokens in the mnemonic.
4762  while (Next != StringRef::npos) {
4763    Start = Next;
4764    Next = Name.find('.', Start + 1);
4765    StringRef ExtraToken = Name.slice(Start, Next);
4766
4767    // Some NEON instructions have an optional datatype suffix that is
4768    // completely ignored. Check for that.
4769    if (isDataTypeToken(ExtraToken) &&
4770        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4771      continue;
4772
4773    if (ExtraToken != ".n") {
4774      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4775      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4776    }
4777  }
4778
4779  // Read the remaining operands.
4780  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4781    // Read the first operand.
4782    if (parseOperand(Operands, Mnemonic)) {
4783      Parser.EatToEndOfStatement();
4784      return true;
4785    }
4786
4787    while (getLexer().is(AsmToken::Comma)) {
4788      Parser.Lex();  // Eat the comma.
4789
4790      // Parse and remember the operand.
4791      if (parseOperand(Operands, Mnemonic)) {
4792        Parser.EatToEndOfStatement();
4793        return true;
4794      }
4795    }
4796  }
4797
4798  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4799    SMLoc Loc = getLexer().getLoc();
4800    Parser.EatToEndOfStatement();
4801    return Error(Loc, "unexpected token in argument list");
4802  }
4803
4804  Parser.Lex(); // Consume the EndOfStatement
4805
4806  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4807  // do and don't have a cc_out optional-def operand. With some spot-checks
4808  // of the operand list, we can figure out which variant we're trying to
4809  // parse and adjust accordingly before actually matching. We shouldn't ever
4810  // try to remove a cc_out operand that was explicitly set on the the
4811  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4812  // table driven matcher doesn't fit well with the ARM instruction set.
4813  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4814    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4815    Operands.erase(Operands.begin() + 1);
4816    delete Op;
4817  }
4818
4819  // ARM mode 'blx' need special handling, as the register operand version
4820  // is predicable, but the label operand version is not. So, we can't rely
4821  // on the Mnemonic based checking to correctly figure out when to put
4822  // a k_CondCode operand in the list. If we're trying to match the label
4823  // version, remove the k_CondCode operand here.
4824  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4825      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4826    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4827    Operands.erase(Operands.begin() + 1);
4828    delete Op;
4829  }
4830
4831  // The vector-compare-to-zero instructions have a literal token "#0" at
4832  // the end that comes to here as an immediate operand. Convert it to a
4833  // token to play nicely with the matcher.
4834  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4835      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4836      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4837    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4838    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4839    if (CE && CE->getValue() == 0) {
4840      Operands.erase(Operands.begin() + 5);
4841      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4842      delete Op;
4843    }
4844  }
4845  // VCMP{E} does the same thing, but with a different operand count.
4846  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4847      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4848    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4849    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4850    if (CE && CE->getValue() == 0) {
4851      Operands.erase(Operands.begin() + 4);
4852      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4853      delete Op;
4854    }
4855  }
4856  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4857  // end. Convert it to a token here. Take care not to convert those
4858  // that should hit the Thumb2 encoding.
4859  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4860      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4861      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4862      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4863    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4864    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4865    if (CE && CE->getValue() == 0 &&
4866        (isThumbOne() ||
4867         // The cc_out operand matches the IT block.
4868         ((inITBlock() != CarrySetting) &&
4869         // Neither register operand is a high register.
4870         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4871          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4872      Operands.erase(Operands.begin() + 5);
4873      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4874      delete Op;
4875    }
4876  }
4877
4878  return false;
4879}
4880
4881// Validate context-sensitive operand constraints.
4882
4883// return 'true' if register list contains non-low GPR registers,
4884// 'false' otherwise. If Reg is in the register list or is HiReg, set
4885// 'containsReg' to true.
4886static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4887                                 unsigned HiReg, bool &containsReg) {
4888  containsReg = false;
4889  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4890    unsigned OpReg = Inst.getOperand(i).getReg();
4891    if (OpReg == Reg)
4892      containsReg = true;
4893    // Anything other than a low register isn't legal here.
4894    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4895      return true;
4896  }
4897  return false;
4898}
4899
4900// Check if the specified regisgter is in the register list of the inst,
4901// starting at the indicated operand number.
4902static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4903  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4904    unsigned OpReg = Inst.getOperand(i).getReg();
4905    if (OpReg == Reg)
4906      return true;
4907  }
4908  return false;
4909}
4910
4911// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4912// the ARMInsts array) instead. Getting that here requires awkward
4913// API changes, though. Better way?
4914namespace llvm {
4915extern const MCInstrDesc ARMInsts[];
4916}
4917static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4918  return ARMInsts[Opcode];
4919}
4920
4921// FIXME: We would really like to be able to tablegen'erate this.
4922bool ARMAsmParser::
4923validateInstruction(MCInst &Inst,
4924                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4925  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4926  SMLoc Loc = Operands[0]->getStartLoc();
4927  // Check the IT block state first.
4928  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4929  // being allowed in IT blocks, but not being predicable.  It just always
4930  // executes.
4931  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4932    unsigned bit = 1;
4933    if (ITState.FirstCond)
4934      ITState.FirstCond = false;
4935    else
4936      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4937    // The instruction must be predicable.
4938    if (!MCID.isPredicable())
4939      return Error(Loc, "instructions in IT block must be predicable");
4940    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4941    unsigned ITCond = bit ? ITState.Cond :
4942      ARMCC::getOppositeCondition(ITState.Cond);
4943    if (Cond != ITCond) {
4944      // Find the condition code Operand to get its SMLoc information.
4945      SMLoc CondLoc;
4946      for (unsigned i = 1; i < Operands.size(); ++i)
4947        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4948          CondLoc = Operands[i]->getStartLoc();
4949      return Error(CondLoc, "incorrect condition in IT block; got '" +
4950                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4951                   "', but expected '" +
4952                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4953    }
4954  // Check for non-'al' condition codes outside of the IT block.
4955  } else if (isThumbTwo() && MCID.isPredicable() &&
4956             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4957             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4958             Inst.getOpcode() != ARM::t2B)
4959    return Error(Loc, "predicated instructions must be in IT block");
4960
4961  switch (Inst.getOpcode()) {
4962  case ARM::LDRD:
4963  case ARM::LDRD_PRE:
4964  case ARM::LDRD_POST:
4965  case ARM::LDREXD: {
4966    // Rt2 must be Rt + 1.
4967    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4968    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4969    if (Rt2 != Rt + 1)
4970      return Error(Operands[3]->getStartLoc(),
4971                   "destination operands must be sequential");
4972    return false;
4973  }
4974  case ARM::STRD: {
4975    // Rt2 must be Rt + 1.
4976    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4977    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4978    if (Rt2 != Rt + 1)
4979      return Error(Operands[3]->getStartLoc(),
4980                   "source operands must be sequential");
4981    return false;
4982  }
4983  case ARM::STRD_PRE:
4984  case ARM::STRD_POST:
4985  case ARM::STREXD: {
4986    // Rt2 must be Rt + 1.
4987    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4988    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
4989    if (Rt2 != Rt + 1)
4990      return Error(Operands[3]->getStartLoc(),
4991                   "source operands must be sequential");
4992    return false;
4993  }
4994  case ARM::SBFX:
4995  case ARM::UBFX: {
4996    // width must be in range [1, 32-lsb]
4997    unsigned lsb = Inst.getOperand(2).getImm();
4998    unsigned widthm1 = Inst.getOperand(3).getImm();
4999    if (widthm1 >= 32 - lsb)
5000      return Error(Operands[5]->getStartLoc(),
5001                   "bitfield width must be in range [1,32-lsb]");
5002    return false;
5003  }
5004  case ARM::tLDMIA: {
5005    // If we're parsing Thumb2, the .w variant is available and handles
5006    // most cases that are normally illegal for a Thumb1 LDM
5007    // instruction. We'll make the transformation in processInstruction()
5008    // if necessary.
5009    //
5010    // Thumb LDM instructions are writeback iff the base register is not
5011    // in the register list.
5012    unsigned Rn = Inst.getOperand(0).getReg();
5013    bool hasWritebackToken =
5014      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5015       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5016    bool listContainsBase;
5017    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5018      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5019                   "registers must be in range r0-r7");
5020    // If we should have writeback, then there should be a '!' token.
5021    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5022      return Error(Operands[2]->getStartLoc(),
5023                   "writeback operator '!' expected");
5024    // If we should not have writeback, there must not be a '!'. This is
5025    // true even for the 32-bit wide encodings.
5026    if (listContainsBase && hasWritebackToken)
5027      return Error(Operands[3]->getStartLoc(),
5028                   "writeback operator '!' not allowed when base register "
5029                   "in register list");
5030
5031    break;
5032  }
5033  case ARM::t2LDMIA_UPD: {
5034    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5035      return Error(Operands[4]->getStartLoc(),
5036                   "writeback operator '!' not allowed when base register "
5037                   "in register list");
5038    break;
5039  }
5040  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5041  // so only issue a diagnostic for thumb1. The instructions will be
5042  // switched to the t2 encodings in processInstruction() if necessary.
5043  case ARM::tPOP: {
5044    bool listContainsBase;
5045    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5046        !isThumbTwo())
5047      return Error(Operands[2]->getStartLoc(),
5048                   "registers must be in range r0-r7 or pc");
5049    break;
5050  }
5051  case ARM::tPUSH: {
5052    bool listContainsBase;
5053    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5054        !isThumbTwo())
5055      return Error(Operands[2]->getStartLoc(),
5056                   "registers must be in range r0-r7 or lr");
5057    break;
5058  }
5059  case ARM::tSTMIA_UPD: {
5060    bool listContainsBase;
5061    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5062      return Error(Operands[4]->getStartLoc(),
5063                   "registers must be in range r0-r7");
5064    break;
5065  }
5066  }
5067
5068  return false;
5069}
5070
5071static unsigned getRealVSTLNOpcode(unsigned Opc) {
5072  switch(Opc) {
5073  default: assert(0 && "unexpected opcode!");
5074  // VST1LN
5075  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5076  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5077  case ARM::VST1LNdWB_fixed_Asm_U8:
5078    return ARM::VST1LNd8_UPD;
5079  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5080  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5081  case ARM::VST1LNdWB_fixed_Asm_U16:
5082    return ARM::VST1LNd16_UPD;
5083  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5084  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5085  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5086    return ARM::VST1LNd32_UPD;
5087  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5088  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5089  case ARM::VST1LNdWB_register_Asm_U8:
5090    return ARM::VST1LNd8_UPD;
5091  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5092  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5093  case ARM::VST1LNdWB_register_Asm_U16:
5094    return ARM::VST1LNd16_UPD;
5095  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5096  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5097  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5098    return ARM::VST1LNd32_UPD;
5099  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5100  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5101  case ARM::VST1LNdAsm_U8:
5102    return ARM::VST1LNd8;
5103  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5104  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5105  case ARM::VST1LNdAsm_U16:
5106    return ARM::VST1LNd16;
5107  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5108  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5109  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5110    return ARM::VST1LNd32;
5111
5112  // VST2LN
5113  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5114  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5115  case ARM::VST2LNdWB_fixed_Asm_U8:
5116    return ARM::VST2LNd8_UPD;
5117  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5118  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5119  case ARM::VST2LNdWB_fixed_Asm_U16:
5120    return ARM::VST2LNd16_UPD;
5121  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5122  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5123  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5124    return ARM::VST2LNd32_UPD;
5125  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5126  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5127  case ARM::VST2LNdWB_register_Asm_U8:
5128    return ARM::VST2LNd8_UPD;
5129  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5130  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5131  case ARM::VST2LNdWB_register_Asm_U16:
5132    return ARM::VST2LNd16_UPD;
5133  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5134  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5135  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5136    return ARM::VST2LNd32_UPD;
5137  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5138  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5139  case ARM::VST2LNdAsm_U8:
5140    return ARM::VST2LNd8;
5141  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5142  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5143  case ARM::VST2LNdAsm_U16:
5144    return ARM::VST2LNd16;
5145  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5146  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5147  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5148    return ARM::VST2LNd32;
5149  }
5150}
5151
5152static unsigned getRealVLDLNOpcode(unsigned Opc) {
5153  switch(Opc) {
5154  default: assert(0 && "unexpected opcode!");
5155  // VLD1LN
5156  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5157  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5158  case ARM::VLD1LNdWB_fixed_Asm_U8:
5159    return ARM::VLD1LNd8_UPD;
5160  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5161  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5162  case ARM::VLD1LNdWB_fixed_Asm_U16:
5163    return ARM::VLD1LNd16_UPD;
5164  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5165  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5166  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5167    return ARM::VLD1LNd32_UPD;
5168  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5169  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5170  case ARM::VLD1LNdWB_register_Asm_U8:
5171    return ARM::VLD1LNd8_UPD;
5172  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5173  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5174  case ARM::VLD1LNdWB_register_Asm_U16:
5175    return ARM::VLD1LNd16_UPD;
5176  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5177  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5178  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5179    return ARM::VLD1LNd32_UPD;
5180  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5181  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5182  case ARM::VLD1LNdAsm_U8:
5183    return ARM::VLD1LNd8;
5184  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5185  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5186  case ARM::VLD1LNdAsm_U16:
5187    return ARM::VLD1LNd16;
5188  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5189  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5190  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5191    return ARM::VLD1LNd32;
5192
5193  // VLD2LN
5194  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5195  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5196  case ARM::VLD2LNdWB_fixed_Asm_U8:
5197    return ARM::VLD2LNd8_UPD;
5198  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5199  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5200  case ARM::VLD2LNdWB_fixed_Asm_U16:
5201    return ARM::VLD2LNd16_UPD;
5202  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5203  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5204  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5205    return ARM::VLD2LNd32_UPD;
5206  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5207  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5208  case ARM::VLD2LNdWB_register_Asm_U8:
5209    return ARM::VLD2LNd8_UPD;
5210  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5211  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5212  case ARM::VLD2LNdWB_register_Asm_U16:
5213    return ARM::VLD2LNd16_UPD;
5214  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5215  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5216  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5217    return ARM::VLD2LNd32_UPD;
5218  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5219  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5220  case ARM::VLD2LNdAsm_U8:
5221    return ARM::VLD2LNd8;
5222  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5223  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5224  case ARM::VLD2LNdAsm_U16:
5225    return ARM::VLD2LNd16;
5226  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5227  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5228  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5229    return ARM::VLD2LNd32;
5230  }
5231}
5232
5233bool ARMAsmParser::
5234processInstruction(MCInst &Inst,
5235                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5236  switch (Inst.getOpcode()) {
5237  // Handle NEON VST complex aliases.
5238  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5239  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5240  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5241  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5242  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5243  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5244  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5245  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5246    MCInst TmpInst;
5247    // Shuffle the operands around so the lane index operand is in the
5248    // right place.
5249    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5250    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5251    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5252    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5253    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5254    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5255    TmpInst.addOperand(Inst.getOperand(1)); // lane
5256    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5257    TmpInst.addOperand(Inst.getOperand(6));
5258    Inst = TmpInst;
5259    return true;
5260  }
5261
5262  case ARM::VST2LNdWB_register_Asm_8: case ARM::VST2LNdWB_register_Asm_P8:
5263  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5264  case ARM::VST2LNdWB_register_Asm_U8: case ARM::VST2LNdWB_register_Asm_16:
5265  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5266  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5267  case ARM::VST2LNdWB_register_Asm_32: case ARM::VST2LNdWB_register_Asm_F:
5268  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5269  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32: {
5270    MCInst TmpInst;
5271    // Shuffle the operands around so the lane index operand is in the
5272    // right place.
5273    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5274    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5275    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5276    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5277    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5278    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5279    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5280    TmpInst.addOperand(Inst.getOperand(1)); // lane
5281    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5282    TmpInst.addOperand(Inst.getOperand(6));
5283    Inst = TmpInst;
5284    return true;
5285  }
5286  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5287  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5288  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5289  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5290  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5291  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5292  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5293  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5294    MCInst TmpInst;
5295    // Shuffle the operands around so the lane index operand is in the
5296    // right place.
5297    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5298    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5299    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5300    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5301    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5302    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5303    TmpInst.addOperand(Inst.getOperand(1)); // lane
5304    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5305    TmpInst.addOperand(Inst.getOperand(5));
5306    Inst = TmpInst;
5307    return true;
5308  }
5309
5310  case ARM::VST2LNdWB_fixed_Asm_8: case ARM::VST2LNdWB_fixed_Asm_P8:
5311  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5312  case ARM::VST2LNdWB_fixed_Asm_U8: case ARM::VST2LNdWB_fixed_Asm_16:
5313  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5314  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5315  case ARM::VST2LNdWB_fixed_Asm_32: case ARM::VST2LNdWB_fixed_Asm_F:
5316  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5317  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32: {
5318    MCInst TmpInst;
5319    // Shuffle the operands around so the lane index operand is in the
5320    // right place.
5321    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5322    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5323    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5324    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5325    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5326    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5327    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5328    TmpInst.addOperand(Inst.getOperand(1)); // lane
5329    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5330    TmpInst.addOperand(Inst.getOperand(5));
5331    Inst = TmpInst;
5332    return true;
5333  }
5334  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5335  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5336  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5337  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5338  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5339  case ARM::VST1LNdAsm_U32: {
5340    MCInst TmpInst;
5341    // Shuffle the operands around so the lane index operand is in the
5342    // right place.
5343    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5344    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5345    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5346    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5347    TmpInst.addOperand(Inst.getOperand(1)); // lane
5348    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5349    TmpInst.addOperand(Inst.getOperand(5));
5350    Inst = TmpInst;
5351    return true;
5352  }
5353
5354  case ARM::VST2LNdAsm_8: case ARM::VST2LNdAsm_P8: case ARM::VST2LNdAsm_I8:
5355  case ARM::VST2LNdAsm_S8: case ARM::VST2LNdAsm_U8: case ARM::VST2LNdAsm_16:
5356  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5357  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32: case ARM::VST2LNdAsm_F:
5358  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5359  case ARM::VST2LNdAsm_U32: {
5360    MCInst TmpInst;
5361    // Shuffle the operands around so the lane index operand is in the
5362    // right place.
5363    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5364    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5365    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5366    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5367    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5368    TmpInst.addOperand(Inst.getOperand(1)); // lane
5369    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5370    TmpInst.addOperand(Inst.getOperand(5));
5371    Inst = TmpInst;
5372    return true;
5373  }
5374  // Handle NEON VLD complex aliases.
5375  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5376  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5377  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5378  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5379  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5380  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5381  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5382  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5383    MCInst TmpInst;
5384    // Shuffle the operands around so the lane index operand is in the
5385    // right place.
5386    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5387    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5388    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5389    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5390    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5391    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5392    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5393    TmpInst.addOperand(Inst.getOperand(1)); // lane
5394    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5395    TmpInst.addOperand(Inst.getOperand(6));
5396    Inst = TmpInst;
5397    return true;
5398  }
5399
5400  case ARM::VLD2LNdWB_register_Asm_8: case ARM::VLD2LNdWB_register_Asm_P8:
5401  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5402  case ARM::VLD2LNdWB_register_Asm_U8: case ARM::VLD2LNdWB_register_Asm_16:
5403  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5404  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5405  case ARM::VLD2LNdWB_register_Asm_32: case ARM::VLD2LNdWB_register_Asm_F:
5406  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5407  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32: {
5408    MCInst TmpInst;
5409    // Shuffle the operands around so the lane index operand is in the
5410    // right place.
5411    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5412    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5413    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5414    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5415    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5416    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5417    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5418    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5419    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5420    TmpInst.addOperand(Inst.getOperand(1)); // lane
5421    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5422    TmpInst.addOperand(Inst.getOperand(6));
5423    Inst = TmpInst;
5424    return true;
5425  }
5426
5427  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5428  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5429  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5430  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5431  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5432  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5433  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5434  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5435    MCInst TmpInst;
5436    // Shuffle the operands around so the lane index operand is in the
5437    // right place.
5438    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5439    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5440    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5441    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5442    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5443    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5444    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5445    TmpInst.addOperand(Inst.getOperand(1)); // lane
5446    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5447    TmpInst.addOperand(Inst.getOperand(5));
5448    Inst = TmpInst;
5449    return true;
5450  }
5451
5452  case ARM::VLD2LNdWB_fixed_Asm_8: case ARM::VLD2LNdWB_fixed_Asm_P8:
5453  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5454  case ARM::VLD2LNdWB_fixed_Asm_U8: case ARM::VLD2LNdWB_fixed_Asm_16:
5455  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5456  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5457  case ARM::VLD2LNdWB_fixed_Asm_32: case ARM::VLD2LNdWB_fixed_Asm_F:
5458  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5459  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32: {
5460    MCInst TmpInst;
5461    // Shuffle the operands around so the lane index operand is in the
5462    // right place.
5463    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5464    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5465    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5466    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5467    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5468    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5469    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5470    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5471    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5472    TmpInst.addOperand(Inst.getOperand(1)); // lane
5473    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5474    TmpInst.addOperand(Inst.getOperand(5));
5475    Inst = TmpInst;
5476    return true;
5477  }
5478
5479  case ARM::VLD1LNdAsm_8: case ARM::VLD1LNdAsm_P8: case ARM::VLD1LNdAsm_I8:
5480  case ARM::VLD1LNdAsm_S8: case ARM::VLD1LNdAsm_U8: case ARM::VLD1LNdAsm_16:
5481  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5482  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32: case ARM::VLD1LNdAsm_F:
5483  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5484  case ARM::VLD1LNdAsm_U32: {
5485    MCInst TmpInst;
5486    // Shuffle the operands around so the lane index operand is in the
5487    // right place.
5488    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5489    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5490    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5491    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5492    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5493    TmpInst.addOperand(Inst.getOperand(1)); // lane
5494    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5495    TmpInst.addOperand(Inst.getOperand(5));
5496    Inst = TmpInst;
5497    return true;
5498  }
5499
5500  case ARM::VLD2LNdAsm_8: case ARM::VLD2LNdAsm_P8: case ARM::VLD2LNdAsm_I8:
5501  case ARM::VLD2LNdAsm_S8: case ARM::VLD2LNdAsm_U8: case ARM::VLD2LNdAsm_16:
5502  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5503  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32: case ARM::VLD2LNdAsm_F:
5504  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5505  case ARM::VLD2LNdAsm_U32: {
5506    MCInst TmpInst;
5507    // Shuffle the operands around so the lane index operand is in the
5508    // right place.
5509    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5510    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5511    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5512    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5513    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5514    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5515    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5516    TmpInst.addOperand(Inst.getOperand(1)); // lane
5517    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5518    TmpInst.addOperand(Inst.getOperand(5));
5519    Inst = TmpInst;
5520    return true;
5521  }
5522  // Handle the Thumb2 mode MOV complex aliases.
5523  case ARM::t2MOVsi:
5524  case ARM::t2MOVSsi: {
5525    // Which instruction to expand to depends on the CCOut operand and
5526    // whether we're in an IT block if the register operands are low
5527    // registers.
5528    bool isNarrow = false;
5529    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5530        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5531        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5532      isNarrow = true;
5533    MCInst TmpInst;
5534    unsigned newOpc;
5535    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5536    default: llvm_unreachable("unexpected opcode!");
5537    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5538    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5539    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5540    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5541    }
5542    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5543    if (Ammount == 32) Ammount = 0;
5544    TmpInst.setOpcode(newOpc);
5545    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5546    if (isNarrow)
5547      TmpInst.addOperand(MCOperand::CreateReg(
5548          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5549    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5550    TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5551    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5552    TmpInst.addOperand(Inst.getOperand(4));
5553    if (!isNarrow)
5554      TmpInst.addOperand(MCOperand::CreateReg(
5555          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5556    Inst = TmpInst;
5557    return true;
5558  }
5559  // Handle the ARM mode MOV complex aliases.
5560  case ARM::ASRr:
5561  case ARM::LSRr:
5562  case ARM::LSLr:
5563  case ARM::RORr: {
5564    ARM_AM::ShiftOpc ShiftTy;
5565    switch(Inst.getOpcode()) {
5566    default: llvm_unreachable("unexpected opcode!");
5567    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5568    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5569    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5570    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5571    }
5572    // A shift by zero is a plain MOVr, not a MOVsi.
5573    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5574    MCInst TmpInst;
5575    TmpInst.setOpcode(ARM::MOVsr);
5576    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5577    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5578    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5579    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5580    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5581    TmpInst.addOperand(Inst.getOperand(4));
5582    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5583    Inst = TmpInst;
5584    return true;
5585  }
5586  case ARM::ASRi:
5587  case ARM::LSRi:
5588  case ARM::LSLi:
5589  case ARM::RORi: {
5590    ARM_AM::ShiftOpc ShiftTy;
5591    switch(Inst.getOpcode()) {
5592    default: llvm_unreachable("unexpected opcode!");
5593    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5594    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5595    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5596    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5597    }
5598    // A shift by zero is a plain MOVr, not a MOVsi.
5599    unsigned Amt = Inst.getOperand(2).getImm();
5600    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5601    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5602    MCInst TmpInst;
5603    TmpInst.setOpcode(Opc);
5604    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5605    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5606    if (Opc == ARM::MOVsi)
5607      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5608    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5609    TmpInst.addOperand(Inst.getOperand(4));
5610    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5611    Inst = TmpInst;
5612    return true;
5613  }
5614  case ARM::RRXi: {
5615    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5616    MCInst TmpInst;
5617    TmpInst.setOpcode(ARM::MOVsi);
5618    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5619    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5620    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5621    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5622    TmpInst.addOperand(Inst.getOperand(3));
5623    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5624    Inst = TmpInst;
5625    return true;
5626  }
5627  case ARM::t2LDMIA_UPD: {
5628    // If this is a load of a single register, then we should use
5629    // a post-indexed LDR instruction instead, per the ARM ARM.
5630    if (Inst.getNumOperands() != 5)
5631      return false;
5632    MCInst TmpInst;
5633    TmpInst.setOpcode(ARM::t2LDR_POST);
5634    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5635    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5636    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5637    TmpInst.addOperand(MCOperand::CreateImm(4));
5638    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5639    TmpInst.addOperand(Inst.getOperand(3));
5640    Inst = TmpInst;
5641    return true;
5642  }
5643  case ARM::t2STMDB_UPD: {
5644    // If this is a store of a single register, then we should use
5645    // a pre-indexed STR instruction instead, per the ARM ARM.
5646    if (Inst.getNumOperands() != 5)
5647      return false;
5648    MCInst TmpInst;
5649    TmpInst.setOpcode(ARM::t2STR_PRE);
5650    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5651    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5652    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5653    TmpInst.addOperand(MCOperand::CreateImm(-4));
5654    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5655    TmpInst.addOperand(Inst.getOperand(3));
5656    Inst = TmpInst;
5657    return true;
5658  }
5659  case ARM::LDMIA_UPD:
5660    // If this is a load of a single register via a 'pop', then we should use
5661    // a post-indexed LDR instruction instead, per the ARM ARM.
5662    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5663        Inst.getNumOperands() == 5) {
5664      MCInst TmpInst;
5665      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5666      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5667      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5668      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5669      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5670      TmpInst.addOperand(MCOperand::CreateImm(4));
5671      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5672      TmpInst.addOperand(Inst.getOperand(3));
5673      Inst = TmpInst;
5674      return true;
5675    }
5676    break;
5677  case ARM::STMDB_UPD:
5678    // If this is a store of a single register via a 'push', then we should use
5679    // a pre-indexed STR instruction instead, per the ARM ARM.
5680    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5681        Inst.getNumOperands() == 5) {
5682      MCInst TmpInst;
5683      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5684      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5685      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5686      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5687      TmpInst.addOperand(MCOperand::CreateImm(-4));
5688      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5689      TmpInst.addOperand(Inst.getOperand(3));
5690      Inst = TmpInst;
5691    }
5692    break;
5693  case ARM::t2ADDri12:
5694    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5695    // mnemonic was used (not "addw"), encoding T3 is preferred.
5696    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5697        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5698      break;
5699    Inst.setOpcode(ARM::t2ADDri);
5700    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5701    break;
5702  case ARM::t2SUBri12:
5703    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5704    // mnemonic was used (not "subw"), encoding T3 is preferred.
5705    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5706        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5707      break;
5708    Inst.setOpcode(ARM::t2SUBri);
5709    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5710    break;
5711  case ARM::tADDi8:
5712    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5713    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5714    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5715    // to encoding T1 if <Rd> is omitted."
5716    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5717      Inst.setOpcode(ARM::tADDi3);
5718      return true;
5719    }
5720    break;
5721  case ARM::tSUBi8:
5722    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5723    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5724    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5725    // to encoding T1 if <Rd> is omitted."
5726    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5727      Inst.setOpcode(ARM::tSUBi3);
5728      return true;
5729    }
5730    break;
5731  case ARM::t2ADDrr: {
5732    // If the destination and first source operand are the same, and
5733    // there's no setting of the flags, use encoding T2 instead of T3.
5734    // Note that this is only for ADD, not SUB. This mirrors the system
5735    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5736    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5737        Inst.getOperand(5).getReg() != 0 ||
5738        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5739         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5740      break;
5741    MCInst TmpInst;
5742    TmpInst.setOpcode(ARM::tADDhirr);
5743    TmpInst.addOperand(Inst.getOperand(0));
5744    TmpInst.addOperand(Inst.getOperand(0));
5745    TmpInst.addOperand(Inst.getOperand(2));
5746    TmpInst.addOperand(Inst.getOperand(3));
5747    TmpInst.addOperand(Inst.getOperand(4));
5748    Inst = TmpInst;
5749    return true;
5750  }
5751  case ARM::tB:
5752    // A Thumb conditional branch outside of an IT block is a tBcc.
5753    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5754      Inst.setOpcode(ARM::tBcc);
5755      return true;
5756    }
5757    break;
5758  case ARM::t2B:
5759    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5760    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5761      Inst.setOpcode(ARM::t2Bcc);
5762      return true;
5763    }
5764    break;
5765  case ARM::t2Bcc:
5766    // If the conditional is AL or we're in an IT block, we really want t2B.
5767    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5768      Inst.setOpcode(ARM::t2B);
5769      return true;
5770    }
5771    break;
5772  case ARM::tBcc:
5773    // If the conditional is AL, we really want tB.
5774    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5775      Inst.setOpcode(ARM::tB);
5776      return true;
5777    }
5778    break;
5779  case ARM::tLDMIA: {
5780    // If the register list contains any high registers, or if the writeback
5781    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
5782    // instead if we're in Thumb2. Otherwise, this should have generated
5783    // an error in validateInstruction().
5784    unsigned Rn = Inst.getOperand(0).getReg();
5785    bool hasWritebackToken =
5786      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5787       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5788    bool listContainsBase;
5789    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
5790        (!listContainsBase && !hasWritebackToken) ||
5791        (listContainsBase && hasWritebackToken)) {
5792      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5793      assert (isThumbTwo());
5794      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
5795      // If we're switching to the updating version, we need to insert
5796      // the writeback tied operand.
5797      if (hasWritebackToken)
5798        Inst.insert(Inst.begin(),
5799                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
5800      return true;
5801    }
5802    break;
5803  }
5804  case ARM::tSTMIA_UPD: {
5805    // If the register list contains any high registers, we need to use
5806    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5807    // should have generated an error in validateInstruction().
5808    unsigned Rn = Inst.getOperand(0).getReg();
5809    bool listContainsBase;
5810    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
5811      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5812      assert (isThumbTwo());
5813      Inst.setOpcode(ARM::t2STMIA_UPD);
5814      return true;
5815    }
5816    break;
5817  }
5818  case ARM::tPOP: {
5819    bool listContainsBase;
5820    // If the register list contains any high registers, we need to use
5821    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5822    // should have generated an error in validateInstruction().
5823    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
5824      return false;
5825    assert (isThumbTwo());
5826    Inst.setOpcode(ARM::t2LDMIA_UPD);
5827    // Add the base register and writeback operands.
5828    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5829    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5830    return true;
5831  }
5832  case ARM::tPUSH: {
5833    bool listContainsBase;
5834    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
5835      return false;
5836    assert (isThumbTwo());
5837    Inst.setOpcode(ARM::t2STMDB_UPD);
5838    // Add the base register and writeback operands.
5839    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5840    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5841    return true;
5842  }
5843  case ARM::t2MOVi: {
5844    // If we can use the 16-bit encoding and the user didn't explicitly
5845    // request the 32-bit variant, transform it here.
5846    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5847        Inst.getOperand(1).getImm() <= 255 &&
5848        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
5849         Inst.getOperand(4).getReg() == ARM::CPSR) ||
5850        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
5851        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5852         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5853      // The operands aren't in the same order for tMOVi8...
5854      MCInst TmpInst;
5855      TmpInst.setOpcode(ARM::tMOVi8);
5856      TmpInst.addOperand(Inst.getOperand(0));
5857      TmpInst.addOperand(Inst.getOperand(4));
5858      TmpInst.addOperand(Inst.getOperand(1));
5859      TmpInst.addOperand(Inst.getOperand(2));
5860      TmpInst.addOperand(Inst.getOperand(3));
5861      Inst = TmpInst;
5862      return true;
5863    }
5864    break;
5865  }
5866  case ARM::t2MOVr: {
5867    // If we can use the 16-bit encoding and the user didn't explicitly
5868    // request the 32-bit variant, transform it here.
5869    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5870        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5871        Inst.getOperand(2).getImm() == ARMCC::AL &&
5872        Inst.getOperand(4).getReg() == ARM::CPSR &&
5873        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5874         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5875      // The operands aren't the same for tMOV[S]r... (no cc_out)
5876      MCInst TmpInst;
5877      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
5878      TmpInst.addOperand(Inst.getOperand(0));
5879      TmpInst.addOperand(Inst.getOperand(1));
5880      TmpInst.addOperand(Inst.getOperand(2));
5881      TmpInst.addOperand(Inst.getOperand(3));
5882      Inst = TmpInst;
5883      return true;
5884    }
5885    break;
5886  }
5887  case ARM::t2SXTH:
5888  case ARM::t2SXTB:
5889  case ARM::t2UXTH:
5890  case ARM::t2UXTB: {
5891    // If we can use the 16-bit encoding and the user didn't explicitly
5892    // request the 32-bit variant, transform it here.
5893    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5894        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5895        Inst.getOperand(2).getImm() == 0 &&
5896        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5897         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5898      unsigned NewOpc;
5899      switch (Inst.getOpcode()) {
5900      default: llvm_unreachable("Illegal opcode!");
5901      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
5902      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
5903      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
5904      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
5905      }
5906      // The operands aren't the same for thumb1 (no rotate operand).
5907      MCInst TmpInst;
5908      TmpInst.setOpcode(NewOpc);
5909      TmpInst.addOperand(Inst.getOperand(0));
5910      TmpInst.addOperand(Inst.getOperand(1));
5911      TmpInst.addOperand(Inst.getOperand(3));
5912      TmpInst.addOperand(Inst.getOperand(4));
5913      Inst = TmpInst;
5914      return true;
5915    }
5916    break;
5917  }
5918  case ARM::t2IT: {
5919    // The mask bits for all but the first condition are represented as
5920    // the low bit of the condition code value implies 't'. We currently
5921    // always have 1 implies 't', so XOR toggle the bits if the low bit
5922    // of the condition code is zero. The encoding also expects the low
5923    // bit of the condition to be encoded as bit 4 of the mask operand,
5924    // so mask that in if needed
5925    MCOperand &MO = Inst.getOperand(1);
5926    unsigned Mask = MO.getImm();
5927    unsigned OrigMask = Mask;
5928    unsigned TZ = CountTrailingZeros_32(Mask);
5929    if ((Inst.getOperand(0).getImm() & 1) == 0) {
5930      assert(Mask && TZ <= 3 && "illegal IT mask value!");
5931      for (unsigned i = 3; i != TZ; --i)
5932        Mask ^= 1 << i;
5933    } else
5934      Mask |= 0x10;
5935    MO.setImm(Mask);
5936
5937    // Set up the IT block state according to the IT instruction we just
5938    // matched.
5939    assert(!inITBlock() && "nested IT blocks?!");
5940    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
5941    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
5942    ITState.CurPosition = 0;
5943    ITState.FirstCond = true;
5944    break;
5945  }
5946  }
5947  return false;
5948}
5949
5950unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
5951  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
5952  // suffix depending on whether they're in an IT block or not.
5953  unsigned Opc = Inst.getOpcode();
5954  const MCInstrDesc &MCID = getInstDesc(Opc);
5955  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
5956    assert(MCID.hasOptionalDef() &&
5957           "optionally flag setting instruction missing optional def operand");
5958    assert(MCID.NumOperands == Inst.getNumOperands() &&
5959           "operand count mismatch!");
5960    // Find the optional-def operand (cc_out).
5961    unsigned OpNo;
5962    for (OpNo = 0;
5963         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
5964         ++OpNo)
5965      ;
5966    // If we're parsing Thumb1, reject it completely.
5967    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
5968      return Match_MnemonicFail;
5969    // If we're parsing Thumb2, which form is legal depends on whether we're
5970    // in an IT block.
5971    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
5972        !inITBlock())
5973      return Match_RequiresITBlock;
5974    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
5975        inITBlock())
5976      return Match_RequiresNotITBlock;
5977  }
5978  // Some high-register supporting Thumb1 encodings only allow both registers
5979  // to be from r0-r7 when in Thumb2.
5980  else if (Opc == ARM::tADDhirr && isThumbOne() &&
5981           isARMLowRegister(Inst.getOperand(1).getReg()) &&
5982           isARMLowRegister(Inst.getOperand(2).getReg()))
5983    return Match_RequiresThumb2;
5984  // Others only require ARMv6 or later.
5985  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
5986           isARMLowRegister(Inst.getOperand(0).getReg()) &&
5987           isARMLowRegister(Inst.getOperand(1).getReg()))
5988    return Match_RequiresV6;
5989  return Match_Success;
5990}
5991
5992bool ARMAsmParser::
5993MatchAndEmitInstruction(SMLoc IDLoc,
5994                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
5995                        MCStreamer &Out) {
5996  MCInst Inst;
5997  unsigned ErrorInfo;
5998  unsigned MatchResult;
5999  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6000  switch (MatchResult) {
6001  default: break;
6002  case Match_Success:
6003    // Context sensitive operand constraints aren't handled by the matcher,
6004    // so check them here.
6005    if (validateInstruction(Inst, Operands)) {
6006      // Still progress the IT block, otherwise one wrong condition causes
6007      // nasty cascading errors.
6008      forwardITPosition();
6009      return true;
6010    }
6011
6012    // Some instructions need post-processing to, for example, tweak which
6013    // encoding is selected. Loop on it while changes happen so the
6014    // individual transformations can chain off each other. E.g.,
6015    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6016    while (processInstruction(Inst, Operands))
6017      ;
6018
6019    // Only move forward at the very end so that everything in validate
6020    // and process gets a consistent answer about whether we're in an IT
6021    // block.
6022    forwardITPosition();
6023
6024    Out.EmitInstruction(Inst);
6025    return false;
6026  case Match_MissingFeature:
6027    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6028    return true;
6029  case Match_InvalidOperand: {
6030    SMLoc ErrorLoc = IDLoc;
6031    if (ErrorInfo != ~0U) {
6032      if (ErrorInfo >= Operands.size())
6033        return Error(IDLoc, "too few operands for instruction");
6034
6035      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6036      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6037    }
6038
6039    return Error(ErrorLoc, "invalid operand for instruction");
6040  }
6041  case Match_MnemonicFail:
6042    return Error(IDLoc, "invalid instruction");
6043  case Match_ConversionFail:
6044    // The converter function will have already emited a diagnostic.
6045    return true;
6046  case Match_RequiresNotITBlock:
6047    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6048  case Match_RequiresITBlock:
6049    return Error(IDLoc, "instruction only valid inside IT block");
6050  case Match_RequiresV6:
6051    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6052  case Match_RequiresThumb2:
6053    return Error(IDLoc, "instruction variant requires Thumb2");
6054  }
6055
6056  llvm_unreachable("Implement any new match types added!");
6057  return true;
6058}
6059
6060/// parseDirective parses the arm specific directives
6061bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6062  StringRef IDVal = DirectiveID.getIdentifier();
6063  if (IDVal == ".word")
6064    return parseDirectiveWord(4, DirectiveID.getLoc());
6065  else if (IDVal == ".thumb")
6066    return parseDirectiveThumb(DirectiveID.getLoc());
6067  else if (IDVal == ".arm")
6068    return parseDirectiveARM(DirectiveID.getLoc());
6069  else if (IDVal == ".thumb_func")
6070    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6071  else if (IDVal == ".code")
6072    return parseDirectiveCode(DirectiveID.getLoc());
6073  else if (IDVal == ".syntax")
6074    return parseDirectiveSyntax(DirectiveID.getLoc());
6075  else if (IDVal == ".unreq")
6076    return parseDirectiveUnreq(DirectiveID.getLoc());
6077  return true;
6078}
6079
6080/// parseDirectiveWord
6081///  ::= .word [ expression (, expression)* ]
6082bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6083  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6084    for (;;) {
6085      const MCExpr *Value;
6086      if (getParser().ParseExpression(Value))
6087        return true;
6088
6089      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6090
6091      if (getLexer().is(AsmToken::EndOfStatement))
6092        break;
6093
6094      // FIXME: Improve diagnostic.
6095      if (getLexer().isNot(AsmToken::Comma))
6096        return Error(L, "unexpected token in directive");
6097      Parser.Lex();
6098    }
6099  }
6100
6101  Parser.Lex();
6102  return false;
6103}
6104
6105/// parseDirectiveThumb
6106///  ::= .thumb
6107bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6108  if (getLexer().isNot(AsmToken::EndOfStatement))
6109    return Error(L, "unexpected token in directive");
6110  Parser.Lex();
6111
6112  if (!isThumb())
6113    SwitchMode();
6114  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6115  return false;
6116}
6117
6118/// parseDirectiveARM
6119///  ::= .arm
6120bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6121  if (getLexer().isNot(AsmToken::EndOfStatement))
6122    return Error(L, "unexpected token in directive");
6123  Parser.Lex();
6124
6125  if (isThumb())
6126    SwitchMode();
6127  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6128  return false;
6129}
6130
6131/// parseDirectiveThumbFunc
6132///  ::= .thumbfunc symbol_name
6133bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6134  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6135  bool isMachO = MAI.hasSubsectionsViaSymbols();
6136  StringRef Name;
6137
6138  // Darwin asm has function name after .thumb_func direction
6139  // ELF doesn't
6140  if (isMachO) {
6141    const AsmToken &Tok = Parser.getTok();
6142    if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6143      return Error(L, "unexpected token in .thumb_func directive");
6144    Name = Tok.getIdentifier();
6145    Parser.Lex(); // Consume the identifier token.
6146  }
6147
6148 if (getLexer().isNot(AsmToken::EndOfStatement))
6149    return Error(L, "unexpected token in directive");
6150  Parser.Lex();
6151
6152  // FIXME: assuming function name will be the line following .thumb_func
6153  if (!isMachO) {
6154    Name = Parser.getTok().getIdentifier();
6155  }
6156
6157  // Mark symbol as a thumb symbol.
6158  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6159  getParser().getStreamer().EmitThumbFunc(Func);
6160  return false;
6161}
6162
6163/// parseDirectiveSyntax
6164///  ::= .syntax unified | divided
6165bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6166  const AsmToken &Tok = Parser.getTok();
6167  if (Tok.isNot(AsmToken::Identifier))
6168    return Error(L, "unexpected token in .syntax directive");
6169  StringRef Mode = Tok.getString();
6170  if (Mode == "unified" || Mode == "UNIFIED")
6171    Parser.Lex();
6172  else if (Mode == "divided" || Mode == "DIVIDED")
6173    return Error(L, "'.syntax divided' arm asssembly not supported");
6174  else
6175    return Error(L, "unrecognized syntax mode in .syntax directive");
6176
6177  if (getLexer().isNot(AsmToken::EndOfStatement))
6178    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6179  Parser.Lex();
6180
6181  // TODO tell the MC streamer the mode
6182  // getParser().getStreamer().Emit???();
6183  return false;
6184}
6185
6186/// parseDirectiveCode
6187///  ::= .code 16 | 32
6188bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6189  const AsmToken &Tok = Parser.getTok();
6190  if (Tok.isNot(AsmToken::Integer))
6191    return Error(L, "unexpected token in .code directive");
6192  int64_t Val = Parser.getTok().getIntVal();
6193  if (Val == 16)
6194    Parser.Lex();
6195  else if (Val == 32)
6196    Parser.Lex();
6197  else
6198    return Error(L, "invalid operand to .code directive");
6199
6200  if (getLexer().isNot(AsmToken::EndOfStatement))
6201    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6202  Parser.Lex();
6203
6204  if (Val == 16) {
6205    if (!isThumb())
6206      SwitchMode();
6207    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6208  } else {
6209    if (isThumb())
6210      SwitchMode();
6211    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6212  }
6213
6214  return false;
6215}
6216
6217/// parseDirectiveReq
6218///  ::= name .req registername
6219bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6220  Parser.Lex(); // Eat the '.req' token.
6221  unsigned Reg;
6222  SMLoc SRegLoc, ERegLoc;
6223  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6224    Parser.EatToEndOfStatement();
6225    return Error(SRegLoc, "register name expected");
6226  }
6227
6228  // Shouldn't be anything else.
6229  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6230    Parser.EatToEndOfStatement();
6231    return Error(Parser.getTok().getLoc(),
6232                 "unexpected input in .req directive.");
6233  }
6234
6235  Parser.Lex(); // Consume the EndOfStatement
6236
6237  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6238    return Error(SRegLoc, "redefinition of '" + Name +
6239                          "' does not match original.");
6240
6241  return false;
6242}
6243
6244/// parseDirectiveUneq
6245///  ::= .unreq registername
6246bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6247  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6248    Parser.EatToEndOfStatement();
6249    return Error(L, "unexpected input in .unreq directive.");
6250  }
6251  RegisterReqs.erase(Parser.getTok().getIdentifier());
6252  Parser.Lex(); // Eat the identifier.
6253  return false;
6254}
6255
6256extern "C" void LLVMInitializeARMAsmLexer();
6257
6258/// Force static initialization.
6259extern "C" void LLVMInitializeARMAsmParser() {
6260  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6261  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6262  LLVMInitializeARMAsmLexer();
6263}
6264
6265#define GET_REGISTER_MATCHER
6266#define GET_MATCHER_IMPLEMENTATION
6267#include "ARMGenAsmMatcher.inc"
6268