ARMAsmParser.cpp revision 04b5d93250bef585631a583a85f6733b1bdc8c52
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  // Map of register aliases registers via the .req directive.
49  StringMap<unsigned> RegisterReqs;
50
51  struct {
52    ARMCC::CondCodes Cond;    // Condition for IT block.
53    unsigned Mask:4;          // Condition mask for instructions.
54                              // Starting at first 1 (from lsb).
55                              //   '1'  condition as indicated in IT.
56                              //   '0'  inverse of condition (else).
57                              // Count of instructions in IT block is
58                              // 4 - trailingzeroes(mask)
59
60    bool FirstCond;           // Explicit flag for when we're parsing the
61                              // First instruction in the IT block. It's
62                              // implied in the mask, so needs special
63                              // handling.
64
65    unsigned CurPosition;     // Current position in parsing of IT
66                              // block. In range [0,3]. Initialized
67                              // according to count of instructions in block.
68                              // ~0U if no active IT block.
69  } ITState;
70  bool inITBlock() { return ITState.CurPosition != ~0U;}
71  void forwardITPosition() {
72    if (!inITBlock()) return;
73    // Move to the next instruction in the IT block, if there is one. If not,
74    // mark the block as done.
75    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
76    if (++ITState.CurPosition == 5 - TZ)
77      ITState.CurPosition = ~0U; // Done with the IT block after this.
78  }
79
80
81  MCAsmParser &getParser() const { return Parser; }
82  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
83
84  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
85  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
86
87  int tryParseRegister();
88  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
89  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
90  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
91  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
92  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
93  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
94  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
95                              unsigned &ShiftAmount);
96  bool parseDirectiveWord(unsigned Size, SMLoc L);
97  bool parseDirectiveThumb(SMLoc L);
98  bool parseDirectiveARM(SMLoc L);
99  bool parseDirectiveThumbFunc(SMLoc L);
100  bool parseDirectiveCode(SMLoc L);
101  bool parseDirectiveSyntax(SMLoc L);
102  bool parseDirectiveReq(StringRef Name, SMLoc L);
103  bool parseDirectiveUnreq(SMLoc L);
104
105  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
106                          bool &CarrySetting, unsigned &ProcessorIMod,
107                          StringRef &ITMask);
108  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
109                             bool &CanAcceptPredicationCode);
110
111  bool isThumb() const {
112    // FIXME: Can tablegen auto-generate this?
113    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
114  }
115  bool isThumbOne() const {
116    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
117  }
118  bool isThumbTwo() const {
119    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
120  }
121  bool hasV6Ops() const {
122    return STI.getFeatureBits() & ARM::HasV6Ops;
123  }
124  bool hasV7Ops() const {
125    return STI.getFeatureBits() & ARM::HasV7Ops;
126  }
127  void SwitchMode() {
128    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
129    setAvailableFeatures(FB);
130  }
131  bool isMClass() const {
132    return STI.getFeatureBits() & ARM::FeatureMClass;
133  }
134
135  /// @name Auto-generated Match Functions
136  /// {
137
138#define GET_ASSEMBLER_HEADER
139#include "ARMGenAsmMatcher.inc"
140
141  /// }
142
143  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
144  OperandMatchResultTy parseCoprocNumOperand(
145    SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseCoprocRegOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseCoprocOptionOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parseMemBarrierOptOperand(
151    SmallVectorImpl<MCParsedAsmOperand*>&);
152  OperandMatchResultTy parseProcIFlagsOperand(
153    SmallVectorImpl<MCParsedAsmOperand*>&);
154  OperandMatchResultTy parseMSRMaskOperand(
155    SmallVectorImpl<MCParsedAsmOperand*>&);
156  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
157                                   StringRef Op, int Low, int High);
158  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
159    return parsePKHImm(O, "lsl", 0, 31);
160  }
161  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
162    return parsePKHImm(O, "asr", 1, 32);
163  }
164  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
168  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
169  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
170  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
171  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
172  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
173
174  // Asm Match Converter Methods
175  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
176                    const SmallVectorImpl<MCParsedAsmOperand*> &);
177  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
178                    const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
180                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
188                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
190                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
192                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
194                             const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
196                             const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
198                             const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
200                             const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
202                  const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
204                  const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
206                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
208                        const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
210                     const SmallVectorImpl<MCParsedAsmOperand*> &);
211  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
212                        const SmallVectorImpl<MCParsedAsmOperand*> &);
213  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
214                     const SmallVectorImpl<MCParsedAsmOperand*> &);
215  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
216                        const SmallVectorImpl<MCParsedAsmOperand*> &);
217
218  bool validateInstruction(MCInst &Inst,
219                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
220  bool processInstruction(MCInst &Inst,
221                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
222  bool shouldOmitCCOutOperand(StringRef Mnemonic,
223                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
224
225public:
226  enum ARMMatchResultTy {
227    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
228    Match_RequiresNotITBlock,
229    Match_RequiresV6,
230    Match_RequiresThumb2
231  };
232
233  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
234    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
235    MCAsmParserExtension::Initialize(_Parser);
236
237    // Initialize the set of available features.
238    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
239
240    // Not in an ITBlock to start with.
241    ITState.CurPosition = ~0U;
242  }
243
244  // Implementation of the MCTargetAsmParser interface:
245  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
246  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
247                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
248  bool ParseDirective(AsmToken DirectiveID);
249
250  unsigned checkTargetMatchPredicate(MCInst &Inst);
251
252  bool MatchAndEmitInstruction(SMLoc IDLoc,
253                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
254                               MCStreamer &Out);
255};
256} // end anonymous namespace
257
258namespace {
259
260/// ARMOperand - Instances of this class represent a parsed ARM machine
261/// instruction.
262class ARMOperand : public MCParsedAsmOperand {
263  enum KindTy {
264    k_CondCode,
265    k_CCOut,
266    k_ITCondMask,
267    k_CoprocNum,
268    k_CoprocReg,
269    k_CoprocOption,
270    k_Immediate,
271    k_FPImmediate,
272    k_MemBarrierOpt,
273    k_Memory,
274    k_PostIndexRegister,
275    k_MSRMask,
276    k_ProcIFlags,
277    k_VectorIndex,
278    k_Register,
279    k_RegisterList,
280    k_DPRRegisterList,
281    k_SPRRegisterList,
282    k_VectorList,
283    k_VectorListAllLanes,
284    k_VectorListIndexed,
285    k_ShiftedRegister,
286    k_ShiftedImmediate,
287    k_ShifterImmediate,
288    k_RotateImmediate,
289    k_BitfieldDescriptor,
290    k_Token
291  } Kind;
292
293  SMLoc StartLoc, EndLoc;
294  SmallVector<unsigned, 8> Registers;
295
296  union {
297    struct {
298      ARMCC::CondCodes Val;
299    } CC;
300
301    struct {
302      unsigned Val;
303    } Cop;
304
305    struct {
306      unsigned Val;
307    } CoprocOption;
308
309    struct {
310      unsigned Mask:4;
311    } ITMask;
312
313    struct {
314      ARM_MB::MemBOpt Val;
315    } MBOpt;
316
317    struct {
318      ARM_PROC::IFlags Val;
319    } IFlags;
320
321    struct {
322      unsigned Val;
323    } MMask;
324
325    struct {
326      const char *Data;
327      unsigned Length;
328    } Tok;
329
330    struct {
331      unsigned RegNum;
332    } Reg;
333
334    // A vector register list is a sequential list of 1 to 4 registers.
335    struct {
336      unsigned RegNum;
337      unsigned Count;
338      unsigned LaneIndex;
339      bool isDoubleSpaced;
340    } VectorList;
341
342    struct {
343      unsigned Val;
344    } VectorIndex;
345
346    struct {
347      const MCExpr *Val;
348    } Imm;
349
350    struct {
351      unsigned Val;       // encoded 8-bit representation
352    } FPImm;
353
354    /// Combined record for all forms of ARM address expressions.
355    struct {
356      unsigned BaseRegNum;
357      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
358      // was specified.
359      const MCConstantExpr *OffsetImm;  // Offset immediate value
360      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
361      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
362      unsigned ShiftImm;        // shift for OffsetReg.
363      unsigned Alignment;       // 0 = no alignment specified
364                                // n = alignment in bytes (2, 4, 8, 16, or 32)
365      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
366    } Memory;
367
368    struct {
369      unsigned RegNum;
370      bool isAdd;
371      ARM_AM::ShiftOpc ShiftTy;
372      unsigned ShiftImm;
373    } PostIdxReg;
374
375    struct {
376      bool isASR;
377      unsigned Imm;
378    } ShifterImm;
379    struct {
380      ARM_AM::ShiftOpc ShiftTy;
381      unsigned SrcReg;
382      unsigned ShiftReg;
383      unsigned ShiftImm;
384    } RegShiftedReg;
385    struct {
386      ARM_AM::ShiftOpc ShiftTy;
387      unsigned SrcReg;
388      unsigned ShiftImm;
389    } RegShiftedImm;
390    struct {
391      unsigned Imm;
392    } RotImm;
393    struct {
394      unsigned LSB;
395      unsigned Width;
396    } Bitfield;
397  };
398
399  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
400public:
401  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
402    Kind = o.Kind;
403    StartLoc = o.StartLoc;
404    EndLoc = o.EndLoc;
405    switch (Kind) {
406    case k_CondCode:
407      CC = o.CC;
408      break;
409    case k_ITCondMask:
410      ITMask = o.ITMask;
411      break;
412    case k_Token:
413      Tok = o.Tok;
414      break;
415    case k_CCOut:
416    case k_Register:
417      Reg = o.Reg;
418      break;
419    case k_RegisterList:
420    case k_DPRRegisterList:
421    case k_SPRRegisterList:
422      Registers = o.Registers;
423      break;
424    case k_VectorList:
425    case k_VectorListAllLanes:
426    case k_VectorListIndexed:
427      VectorList = o.VectorList;
428      break;
429    case k_CoprocNum:
430    case k_CoprocReg:
431      Cop = o.Cop;
432      break;
433    case k_CoprocOption:
434      CoprocOption = o.CoprocOption;
435      break;
436    case k_Immediate:
437      Imm = o.Imm;
438      break;
439    case k_FPImmediate:
440      FPImm = o.FPImm;
441      break;
442    case k_MemBarrierOpt:
443      MBOpt = o.MBOpt;
444      break;
445    case k_Memory:
446      Memory = o.Memory;
447      break;
448    case k_PostIndexRegister:
449      PostIdxReg = o.PostIdxReg;
450      break;
451    case k_MSRMask:
452      MMask = o.MMask;
453      break;
454    case k_ProcIFlags:
455      IFlags = o.IFlags;
456      break;
457    case k_ShifterImmediate:
458      ShifterImm = o.ShifterImm;
459      break;
460    case k_ShiftedRegister:
461      RegShiftedReg = o.RegShiftedReg;
462      break;
463    case k_ShiftedImmediate:
464      RegShiftedImm = o.RegShiftedImm;
465      break;
466    case k_RotateImmediate:
467      RotImm = o.RotImm;
468      break;
469    case k_BitfieldDescriptor:
470      Bitfield = o.Bitfield;
471      break;
472    case k_VectorIndex:
473      VectorIndex = o.VectorIndex;
474      break;
475    }
476  }
477
478  /// getStartLoc - Get the location of the first token of this operand.
479  SMLoc getStartLoc() const { return StartLoc; }
480  /// getEndLoc - Get the location of the last token of this operand.
481  SMLoc getEndLoc() const { return EndLoc; }
482
483  ARMCC::CondCodes getCondCode() const {
484    assert(Kind == k_CondCode && "Invalid access!");
485    return CC.Val;
486  }
487
488  unsigned getCoproc() const {
489    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
490    return Cop.Val;
491  }
492
493  StringRef getToken() const {
494    assert(Kind == k_Token && "Invalid access!");
495    return StringRef(Tok.Data, Tok.Length);
496  }
497
498  unsigned getReg() const {
499    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
500    return Reg.RegNum;
501  }
502
503  const SmallVectorImpl<unsigned> &getRegList() const {
504    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
505            Kind == k_SPRRegisterList) && "Invalid access!");
506    return Registers;
507  }
508
509  const MCExpr *getImm() const {
510    assert(Kind == k_Immediate && "Invalid access!");
511    return Imm.Val;
512  }
513
514  unsigned getFPImm() const {
515    assert(Kind == k_FPImmediate && "Invalid access!");
516    return FPImm.Val;
517  }
518
519  unsigned getVectorIndex() const {
520    assert(Kind == k_VectorIndex && "Invalid access!");
521    return VectorIndex.Val;
522  }
523
524  ARM_MB::MemBOpt getMemBarrierOpt() const {
525    assert(Kind == k_MemBarrierOpt && "Invalid access!");
526    return MBOpt.Val;
527  }
528
529  ARM_PROC::IFlags getProcIFlags() const {
530    assert(Kind == k_ProcIFlags && "Invalid access!");
531    return IFlags.Val;
532  }
533
534  unsigned getMSRMask() const {
535    assert(Kind == k_MSRMask && "Invalid access!");
536    return MMask.Val;
537  }
538
539  bool isCoprocNum() const { return Kind == k_CoprocNum; }
540  bool isCoprocReg() const { return Kind == k_CoprocReg; }
541  bool isCoprocOption() const { return Kind == k_CoprocOption; }
542  bool isCondCode() const { return Kind == k_CondCode; }
543  bool isCCOut() const { return Kind == k_CCOut; }
544  bool isITMask() const { return Kind == k_ITCondMask; }
545  bool isITCondCode() const { return Kind == k_CondCode; }
546  bool isImm() const { return Kind == k_Immediate; }
547  bool isFPImm() const { return Kind == k_FPImmediate; }
548  bool isImm8s4() const {
549    if (Kind != k_Immediate)
550      return false;
551    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
552    if (!CE) return false;
553    int64_t Value = CE->getValue();
554    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
555  }
556  bool isImm0_1020s4() const {
557    if (Kind != k_Immediate)
558      return false;
559    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
560    if (!CE) return false;
561    int64_t Value = CE->getValue();
562    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
563  }
564  bool isImm0_508s4() const {
565    if (Kind != k_Immediate)
566      return false;
567    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
568    if (!CE) return false;
569    int64_t Value = CE->getValue();
570    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
571  }
572  bool isImm0_255() const {
573    if (Kind != k_Immediate)
574      return false;
575    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
576    if (!CE) return false;
577    int64_t Value = CE->getValue();
578    return Value >= 0 && Value < 256;
579  }
580  bool isImm0_1() const {
581    if (Kind != k_Immediate)
582      return false;
583    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
584    if (!CE) return false;
585    int64_t Value = CE->getValue();
586    return Value >= 0 && Value < 2;
587  }
588  bool isImm0_3() const {
589    if (Kind != k_Immediate)
590      return false;
591    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
592    if (!CE) return false;
593    int64_t Value = CE->getValue();
594    return Value >= 0 && Value < 4;
595  }
596  bool isImm0_7() const {
597    if (Kind != k_Immediate)
598      return false;
599    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
600    if (!CE) return false;
601    int64_t Value = CE->getValue();
602    return Value >= 0 && Value < 8;
603  }
604  bool isImm0_15() const {
605    if (Kind != k_Immediate)
606      return false;
607    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
608    if (!CE) return false;
609    int64_t Value = CE->getValue();
610    return Value >= 0 && Value < 16;
611  }
612  bool isImm0_31() const {
613    if (Kind != k_Immediate)
614      return false;
615    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
616    if (!CE) return false;
617    int64_t Value = CE->getValue();
618    return Value >= 0 && Value < 32;
619  }
620  bool isImm0_63() const {
621    if (Kind != k_Immediate)
622      return false;
623    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
624    if (!CE) return false;
625    int64_t Value = CE->getValue();
626    return Value >= 0 && Value < 64;
627  }
628  bool isImm8() const {
629    if (Kind != k_Immediate)
630      return false;
631    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
632    if (!CE) return false;
633    int64_t Value = CE->getValue();
634    return Value == 8;
635  }
636  bool isImm16() const {
637    if (Kind != k_Immediate)
638      return false;
639    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
640    if (!CE) return false;
641    int64_t Value = CE->getValue();
642    return Value == 16;
643  }
644  bool isImm32() const {
645    if (Kind != k_Immediate)
646      return false;
647    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
648    if (!CE) return false;
649    int64_t Value = CE->getValue();
650    return Value == 32;
651  }
652  bool isShrImm8() const {
653    if (Kind != k_Immediate)
654      return false;
655    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
656    if (!CE) return false;
657    int64_t Value = CE->getValue();
658    return Value > 0 && Value <= 8;
659  }
660  bool isShrImm16() const {
661    if (Kind != k_Immediate)
662      return false;
663    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
664    if (!CE) return false;
665    int64_t Value = CE->getValue();
666    return Value > 0 && Value <= 16;
667  }
668  bool isShrImm32() const {
669    if (Kind != k_Immediate)
670      return false;
671    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
672    if (!CE) return false;
673    int64_t Value = CE->getValue();
674    return Value > 0 && Value <= 32;
675  }
676  bool isShrImm64() const {
677    if (Kind != k_Immediate)
678      return false;
679    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
680    if (!CE) return false;
681    int64_t Value = CE->getValue();
682    return Value > 0 && Value <= 64;
683  }
684  bool isImm1_7() const {
685    if (Kind != k_Immediate)
686      return false;
687    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
688    if (!CE) return false;
689    int64_t Value = CE->getValue();
690    return Value > 0 && Value < 8;
691  }
692  bool isImm1_15() const {
693    if (Kind != k_Immediate)
694      return false;
695    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
696    if (!CE) return false;
697    int64_t Value = CE->getValue();
698    return Value > 0 && Value < 16;
699  }
700  bool isImm1_31() const {
701    if (Kind != k_Immediate)
702      return false;
703    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
704    if (!CE) return false;
705    int64_t Value = CE->getValue();
706    return Value > 0 && Value < 32;
707  }
708  bool isImm1_16() const {
709    if (Kind != k_Immediate)
710      return false;
711    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
712    if (!CE) return false;
713    int64_t Value = CE->getValue();
714    return Value > 0 && Value < 17;
715  }
716  bool isImm1_32() const {
717    if (Kind != k_Immediate)
718      return false;
719    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
720    if (!CE) return false;
721    int64_t Value = CE->getValue();
722    return Value > 0 && Value < 33;
723  }
724  bool isImm0_32() const {
725    if (Kind != k_Immediate)
726      return false;
727    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
728    if (!CE) return false;
729    int64_t Value = CE->getValue();
730    return Value >= 0 && Value < 33;
731  }
732  bool isImm0_65535() const {
733    if (Kind != k_Immediate)
734      return false;
735    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
736    if (!CE) return false;
737    int64_t Value = CE->getValue();
738    return Value >= 0 && Value < 65536;
739  }
740  bool isImm0_65535Expr() const {
741    if (Kind != k_Immediate)
742      return false;
743    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
744    // If it's not a constant expression, it'll generate a fixup and be
745    // handled later.
746    if (!CE) return true;
747    int64_t Value = CE->getValue();
748    return Value >= 0 && Value < 65536;
749  }
750  bool isImm24bit() const {
751    if (Kind != k_Immediate)
752      return false;
753    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
754    if (!CE) return false;
755    int64_t Value = CE->getValue();
756    return Value >= 0 && Value <= 0xffffff;
757  }
758  bool isImmThumbSR() const {
759    if (Kind != k_Immediate)
760      return false;
761    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
762    if (!CE) return false;
763    int64_t Value = CE->getValue();
764    return Value > 0 && Value < 33;
765  }
766  bool isPKHLSLImm() const {
767    if (Kind != k_Immediate)
768      return false;
769    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
770    if (!CE) return false;
771    int64_t Value = CE->getValue();
772    return Value >= 0 && Value < 32;
773  }
774  bool isPKHASRImm() const {
775    if (Kind != k_Immediate)
776      return false;
777    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
778    if (!CE) return false;
779    int64_t Value = CE->getValue();
780    return Value > 0 && Value <= 32;
781  }
782  bool isARMSOImm() const {
783    if (Kind != k_Immediate)
784      return false;
785    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
786    if (!CE) return false;
787    int64_t Value = CE->getValue();
788    return ARM_AM::getSOImmVal(Value) != -1;
789  }
790  bool isARMSOImmNot() const {
791    if (Kind != k_Immediate)
792      return false;
793    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
794    if (!CE) return false;
795    int64_t Value = CE->getValue();
796    return ARM_AM::getSOImmVal(~Value) != -1;
797  }
798  bool isARMSOImmNeg() const {
799    if (Kind != k_Immediate)
800      return false;
801    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
802    if (!CE) return false;
803    int64_t Value = CE->getValue();
804    return ARM_AM::getSOImmVal(-Value) != -1;
805  }
806  bool isT2SOImm() const {
807    if (Kind != k_Immediate)
808      return false;
809    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
810    if (!CE) return false;
811    int64_t Value = CE->getValue();
812    return ARM_AM::getT2SOImmVal(Value) != -1;
813  }
814  bool isT2SOImmNot() const {
815    if (Kind != k_Immediate)
816      return false;
817    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
818    if (!CE) return false;
819    int64_t Value = CE->getValue();
820    return ARM_AM::getT2SOImmVal(~Value) != -1;
821  }
822  bool isT2SOImmNeg() const {
823    if (Kind != k_Immediate)
824      return false;
825    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
826    if (!CE) return false;
827    int64_t Value = CE->getValue();
828    return ARM_AM::getT2SOImmVal(-Value) != -1;
829  }
830  bool isSetEndImm() const {
831    if (Kind != k_Immediate)
832      return false;
833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
834    if (!CE) return false;
835    int64_t Value = CE->getValue();
836    return Value == 1 || Value == 0;
837  }
838  bool isReg() const { return Kind == k_Register; }
839  bool isRegList() const { return Kind == k_RegisterList; }
840  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
841  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
842  bool isToken() const { return Kind == k_Token; }
843  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
844  bool isMemory() const { return Kind == k_Memory; }
845  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
846  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
847  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
848  bool isRotImm() const { return Kind == k_RotateImmediate; }
849  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
850  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
851  bool isPostIdxReg() const {
852    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
853  }
854  bool isMemNoOffset(bool alignOK = false) const {
855    if (!isMemory())
856      return false;
857    // No offset of any kind.
858    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
859     (alignOK || Memory.Alignment == 0);
860  }
861  bool isAlignedMemory() const {
862    return isMemNoOffset(true);
863  }
864  bool isAddrMode2() const {
865    if (!isMemory() || Memory.Alignment != 0) return false;
866    // Check for register offset.
867    if (Memory.OffsetRegNum) return true;
868    // Immediate offset in range [-4095, 4095].
869    if (!Memory.OffsetImm) return true;
870    int64_t Val = Memory.OffsetImm->getValue();
871    return Val > -4096 && Val < 4096;
872  }
873  bool isAM2OffsetImm() const {
874    if (Kind != k_Immediate)
875      return false;
876    // Immediate offset in range [-4095, 4095].
877    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
878    if (!CE) return false;
879    int64_t Val = CE->getValue();
880    return Val > -4096 && Val < 4096;
881  }
882  bool isAddrMode3() const {
883    // If we have an immediate that's not a constant, treat it as a label
884    // reference needing a fixup. If it is a constant, it's something else
885    // and we reject it.
886    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
887      return true;
888    if (!isMemory() || Memory.Alignment != 0) return false;
889    // No shifts are legal for AM3.
890    if (Memory.ShiftType != ARM_AM::no_shift) return false;
891    // Check for register offset.
892    if (Memory.OffsetRegNum) return true;
893    // Immediate offset in range [-255, 255].
894    if (!Memory.OffsetImm) return true;
895    int64_t Val = Memory.OffsetImm->getValue();
896    return Val > -256 && Val < 256;
897  }
898  bool isAM3Offset() const {
899    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
900      return false;
901    if (Kind == k_PostIndexRegister)
902      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
903    // Immediate offset in range [-255, 255].
904    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
905    if (!CE) return false;
906    int64_t Val = CE->getValue();
907    // Special case, #-0 is INT32_MIN.
908    return (Val > -256 && Val < 256) || Val == INT32_MIN;
909  }
910  bool isAddrMode5() const {
911    // If we have an immediate that's not a constant, treat it as a label
912    // reference needing a fixup. If it is a constant, it's something else
913    // and we reject it.
914    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
915      return true;
916    if (!isMemory() || Memory.Alignment != 0) return false;
917    // Check for register offset.
918    if (Memory.OffsetRegNum) return false;
919    // Immediate offset in range [-1020, 1020] and a multiple of 4.
920    if (!Memory.OffsetImm) return true;
921    int64_t Val = Memory.OffsetImm->getValue();
922    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
923      Val == INT32_MIN;
924  }
925  bool isMemTBB() const {
926    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
927        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
928      return false;
929    return true;
930  }
931  bool isMemTBH() const {
932    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
933        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
934        Memory.Alignment != 0 )
935      return false;
936    return true;
937  }
938  bool isMemRegOffset() const {
939    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
940      return false;
941    return true;
942  }
943  bool isT2MemRegOffset() const {
944    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
945        Memory.Alignment != 0)
946      return false;
947    // Only lsl #{0, 1, 2, 3} allowed.
948    if (Memory.ShiftType == ARM_AM::no_shift)
949      return true;
950    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
951      return false;
952    return true;
953  }
954  bool isMemThumbRR() const {
955    // Thumb reg+reg addressing is simple. Just two registers, a base and
956    // an offset. No shifts, negations or any other complicating factors.
957    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
958        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
959      return false;
960    return isARMLowRegister(Memory.BaseRegNum) &&
961      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
962  }
963  bool isMemThumbRIs4() const {
964    if (!isMemory() || Memory.OffsetRegNum != 0 ||
965        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
966      return false;
967    // Immediate offset, multiple of 4 in range [0, 124].
968    if (!Memory.OffsetImm) return true;
969    int64_t Val = Memory.OffsetImm->getValue();
970    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
971  }
972  bool isMemThumbRIs2() const {
973    if (!isMemory() || Memory.OffsetRegNum != 0 ||
974        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
975      return false;
976    // Immediate offset, multiple of 4 in range [0, 62].
977    if (!Memory.OffsetImm) return true;
978    int64_t Val = Memory.OffsetImm->getValue();
979    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
980  }
981  bool isMemThumbRIs1() const {
982    if (!isMemory() || Memory.OffsetRegNum != 0 ||
983        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
984      return false;
985    // Immediate offset in range [0, 31].
986    if (!Memory.OffsetImm) return true;
987    int64_t Val = Memory.OffsetImm->getValue();
988    return Val >= 0 && Val <= 31;
989  }
990  bool isMemThumbSPI() const {
991    if (!isMemory() || Memory.OffsetRegNum != 0 ||
992        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
993      return false;
994    // Immediate offset, multiple of 4 in range [0, 1020].
995    if (!Memory.OffsetImm) return true;
996    int64_t Val = Memory.OffsetImm->getValue();
997    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
998  }
999  bool isMemImm8s4Offset() const {
1000    // If we have an immediate that's not a constant, treat it as a label
1001    // reference needing a fixup. If it is a constant, it's something else
1002    // and we reject it.
1003    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1004      return true;
1005    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1006      return false;
1007    // Immediate offset a multiple of 4 in range [-1020, 1020].
1008    if (!Memory.OffsetImm) return true;
1009    int64_t Val = Memory.OffsetImm->getValue();
1010    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
1011  }
1012  bool isMemImm0_1020s4Offset() const {
1013    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1014      return false;
1015    // Immediate offset a multiple of 4 in range [0, 1020].
1016    if (!Memory.OffsetImm) return true;
1017    int64_t Val = Memory.OffsetImm->getValue();
1018    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
1019  }
1020  bool isMemImm8Offset() const {
1021    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1022      return false;
1023    // Immediate offset in range [-255, 255].
1024    if (!Memory.OffsetImm) return true;
1025    int64_t Val = Memory.OffsetImm->getValue();
1026    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
1027  }
1028  bool isMemPosImm8Offset() const {
1029    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1030      return false;
1031    // Immediate offset in range [0, 255].
1032    if (!Memory.OffsetImm) return true;
1033    int64_t Val = Memory.OffsetImm->getValue();
1034    return Val >= 0 && Val < 256;
1035  }
1036  bool isMemNegImm8Offset() const {
1037    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1038      return false;
1039    // Immediate offset in range [-255, -1].
1040    if (!Memory.OffsetImm) return false;
1041    int64_t Val = Memory.OffsetImm->getValue();
1042    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
1043  }
1044  bool isMemUImm12Offset() const {
1045    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1046      return false;
1047    // Immediate offset in range [0, 4095].
1048    if (!Memory.OffsetImm) return true;
1049    int64_t Val = Memory.OffsetImm->getValue();
1050    return (Val >= 0 && Val < 4096);
1051  }
1052  bool isMemImm12Offset() const {
1053    // If we have an immediate that's not a constant, treat it as a label
1054    // reference needing a fixup. If it is a constant, it's something else
1055    // and we reject it.
1056    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1057      return true;
1058
1059    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1060      return false;
1061    // Immediate offset in range [-4095, 4095].
1062    if (!Memory.OffsetImm) return true;
1063    int64_t Val = Memory.OffsetImm->getValue();
1064    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1065  }
1066  bool isPostIdxImm8() const {
1067    if (Kind != k_Immediate)
1068      return false;
1069    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1070    if (!CE) return false;
1071    int64_t Val = CE->getValue();
1072    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1073  }
1074  bool isPostIdxImm8s4() const {
1075    if (Kind != k_Immediate)
1076      return false;
1077    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1078    if (!CE) return false;
1079    int64_t Val = CE->getValue();
1080    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1081      (Val == INT32_MIN);
1082  }
1083
1084  bool isMSRMask() const { return Kind == k_MSRMask; }
1085  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1086
1087  // NEON operands.
1088  bool isSingleSpacedVectorList() const {
1089    return Kind == k_VectorList && !VectorList.isDoubleSpaced;
1090  }
1091  bool isDoubleSpacedVectorList() const {
1092    return Kind == k_VectorList && VectorList.isDoubleSpaced;
1093  }
1094  bool isVecListOneD() const {
1095    if (!isSingleSpacedVectorList()) return false;
1096    return VectorList.Count == 1;
1097  }
1098
1099  bool isVecListTwoD() const {
1100    if (!isSingleSpacedVectorList()) return false;
1101    return VectorList.Count == 2;
1102  }
1103
1104  bool isVecListThreeD() const {
1105    if (!isSingleSpacedVectorList()) return false;
1106    return VectorList.Count == 3;
1107  }
1108
1109  bool isVecListFourD() const {
1110    if (!isSingleSpacedVectorList()) return false;
1111    return VectorList.Count == 4;
1112  }
1113
1114  bool isVecListTwoQ() const {
1115    if (!isDoubleSpacedVectorList()) return false;
1116    return VectorList.Count == 2;
1117  }
1118
1119  bool isVecListOneDAllLanes() const {
1120    if (Kind != k_VectorListAllLanes) return false;
1121    return VectorList.Count == 1;
1122  }
1123
1124  bool isVecListTwoDAllLanes() const {
1125    if (Kind != k_VectorListAllLanes) return false;
1126    return VectorList.Count == 2;
1127  }
1128
1129  bool isVecListOneDByteIndexed() const {
1130    if (Kind != k_VectorListIndexed) return false;
1131    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1132  }
1133
1134  bool isVecListOneDHWordIndexed() const {
1135    if (Kind != k_VectorListIndexed) return false;
1136    return VectorList.Count == 1 && VectorList.LaneIndex <= 3;
1137  }
1138
1139  bool isVecListOneDWordIndexed() const {
1140    if (Kind != k_VectorListIndexed) return false;
1141    return VectorList.Count == 1 && VectorList.LaneIndex <= 1;
1142  }
1143
1144  bool isVecListTwoDByteIndexed() const {
1145    if (Kind != k_VectorListIndexed) return false;
1146    return VectorList.Count == 2 && VectorList.LaneIndex <= 7;
1147  }
1148
1149  bool isVecListTwoDHWordIndexed() const {
1150    if (Kind != k_VectorListIndexed) return false;
1151    return VectorList.Count == 2 && VectorList.LaneIndex <= 3;
1152  }
1153
1154  bool isVecListTwoDWordIndexed() const {
1155    if (Kind != k_VectorListIndexed) return false;
1156    return VectorList.Count == 2 && VectorList.LaneIndex <= 1;
1157  }
1158
1159  bool isVectorIndex8() const {
1160    if (Kind != k_VectorIndex) return false;
1161    return VectorIndex.Val < 8;
1162  }
1163  bool isVectorIndex16() const {
1164    if (Kind != k_VectorIndex) return false;
1165    return VectorIndex.Val < 4;
1166  }
1167  bool isVectorIndex32() const {
1168    if (Kind != k_VectorIndex) return false;
1169    return VectorIndex.Val < 2;
1170  }
1171
1172  bool isNEONi8splat() const {
1173    if (Kind != k_Immediate)
1174      return false;
1175    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1176    // Must be a constant.
1177    if (!CE) return false;
1178    int64_t Value = CE->getValue();
1179    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1180    // value.
1181    return Value >= 0 && Value < 256;
1182  }
1183
1184  bool isNEONi16splat() const {
1185    if (Kind != k_Immediate)
1186      return false;
1187    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1188    // Must be a constant.
1189    if (!CE) return false;
1190    int64_t Value = CE->getValue();
1191    // i16 value in the range [0,255] or [0x0100, 0xff00]
1192    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1193  }
1194
1195  bool isNEONi32splat() const {
1196    if (Kind != k_Immediate)
1197      return false;
1198    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1199    // Must be a constant.
1200    if (!CE) return false;
1201    int64_t Value = CE->getValue();
1202    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1203    return (Value >= 0 && Value < 256) ||
1204      (Value >= 0x0100 && Value <= 0xff00) ||
1205      (Value >= 0x010000 && Value <= 0xff0000) ||
1206      (Value >= 0x01000000 && Value <= 0xff000000);
1207  }
1208
1209  bool isNEONi32vmov() const {
1210    if (Kind != k_Immediate)
1211      return false;
1212    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1213    // Must be a constant.
1214    if (!CE) return false;
1215    int64_t Value = CE->getValue();
1216    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1217    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1218    return (Value >= 0 && Value < 256) ||
1219      (Value >= 0x0100 && Value <= 0xff00) ||
1220      (Value >= 0x010000 && Value <= 0xff0000) ||
1221      (Value >= 0x01000000 && Value <= 0xff000000) ||
1222      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1223      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1224  }
1225  bool isNEONi32vmovNeg() const {
1226    if (Kind != k_Immediate)
1227      return false;
1228    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1229    // Must be a constant.
1230    if (!CE) return false;
1231    int64_t Value = ~CE->getValue();
1232    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1233    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1234    return (Value >= 0 && Value < 256) ||
1235      (Value >= 0x0100 && Value <= 0xff00) ||
1236      (Value >= 0x010000 && Value <= 0xff0000) ||
1237      (Value >= 0x01000000 && Value <= 0xff000000) ||
1238      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1239      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1240  }
1241
1242  bool isNEONi64splat() const {
1243    if (Kind != k_Immediate)
1244      return false;
1245    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1246    // Must be a constant.
1247    if (!CE) return false;
1248    uint64_t Value = CE->getValue();
1249    // i64 value with each byte being either 0 or 0xff.
1250    for (unsigned i = 0; i < 8; ++i)
1251      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1252    return true;
1253  }
1254
1255  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1256    // Add as immediates when possible.  Null MCExpr = 0.
1257    if (Expr == 0)
1258      Inst.addOperand(MCOperand::CreateImm(0));
1259    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1260      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1261    else
1262      Inst.addOperand(MCOperand::CreateExpr(Expr));
1263  }
1264
1265  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1266    assert(N == 2 && "Invalid number of operands!");
1267    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1268    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1269    Inst.addOperand(MCOperand::CreateReg(RegNum));
1270  }
1271
1272  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1273    assert(N == 1 && "Invalid number of operands!");
1274    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1275  }
1276
1277  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1278    assert(N == 1 && "Invalid number of operands!");
1279    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1280  }
1281
1282  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1283    assert(N == 1 && "Invalid number of operands!");
1284    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1285  }
1286
1287  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1288    assert(N == 1 && "Invalid number of operands!");
1289    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1290  }
1291
1292  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1293    assert(N == 1 && "Invalid number of operands!");
1294    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1295  }
1296
1297  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1298    assert(N == 1 && "Invalid number of operands!");
1299    Inst.addOperand(MCOperand::CreateReg(getReg()));
1300  }
1301
1302  void addRegOperands(MCInst &Inst, unsigned N) const {
1303    assert(N == 1 && "Invalid number of operands!");
1304    Inst.addOperand(MCOperand::CreateReg(getReg()));
1305  }
1306
1307  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1308    assert(N == 3 && "Invalid number of operands!");
1309    assert(isRegShiftedReg() &&
1310           "addRegShiftedRegOperands() on non RegShiftedReg!");
1311    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1312    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1313    Inst.addOperand(MCOperand::CreateImm(
1314      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1315  }
1316
1317  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1318    assert(N == 2 && "Invalid number of operands!");
1319    assert(isRegShiftedImm() &&
1320           "addRegShiftedImmOperands() on non RegShiftedImm!");
1321    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1322    Inst.addOperand(MCOperand::CreateImm(
1323      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1324  }
1325
1326  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1327    assert(N == 1 && "Invalid number of operands!");
1328    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1329                                         ShifterImm.Imm));
1330  }
1331
1332  void addRegListOperands(MCInst &Inst, unsigned N) const {
1333    assert(N == 1 && "Invalid number of operands!");
1334    const SmallVectorImpl<unsigned> &RegList = getRegList();
1335    for (SmallVectorImpl<unsigned>::const_iterator
1336           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1337      Inst.addOperand(MCOperand::CreateReg(*I));
1338  }
1339
1340  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1341    addRegListOperands(Inst, N);
1342  }
1343
1344  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1345    addRegListOperands(Inst, N);
1346  }
1347
1348  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1349    assert(N == 1 && "Invalid number of operands!");
1350    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1351    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1352  }
1353
1354  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1355    assert(N == 1 && "Invalid number of operands!");
1356    // Munge the lsb/width into a bitfield mask.
1357    unsigned lsb = Bitfield.LSB;
1358    unsigned width = Bitfield.Width;
1359    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1360    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1361                      (32 - (lsb + width)));
1362    Inst.addOperand(MCOperand::CreateImm(Mask));
1363  }
1364
1365  void addImmOperands(MCInst &Inst, unsigned N) const {
1366    assert(N == 1 && "Invalid number of operands!");
1367    addExpr(Inst, getImm());
1368  }
1369
1370  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1371    assert(N == 1 && "Invalid number of operands!");
1372    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1373  }
1374
1375  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1376    assert(N == 1 && "Invalid number of operands!");
1377    // FIXME: We really want to scale the value here, but the LDRD/STRD
1378    // instruction don't encode operands that way yet.
1379    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1380    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1381  }
1382
1383  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1384    assert(N == 1 && "Invalid number of operands!");
1385    // The immediate is scaled by four in the encoding and is stored
1386    // in the MCInst as such. Lop off the low two bits here.
1387    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1388    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1389  }
1390
1391  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1392    assert(N == 1 && "Invalid number of operands!");
1393    // The immediate is scaled by four in the encoding and is stored
1394    // in the MCInst as such. Lop off the low two bits here.
1395    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1396    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1397  }
1398
1399  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1400    assert(N == 1 && "Invalid number of operands!");
1401    // The constant encodes as the immediate-1, and we store in the instruction
1402    // the bits as encoded, so subtract off one here.
1403    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1404    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1405  }
1406
1407  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1408    assert(N == 1 && "Invalid number of operands!");
1409    // The constant encodes as the immediate-1, and we store in the instruction
1410    // the bits as encoded, so subtract off one here.
1411    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1412    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1413  }
1414
1415  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1416    assert(N == 1 && "Invalid number of operands!");
1417    // The constant encodes as the immediate, except for 32, which encodes as
1418    // zero.
1419    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1420    unsigned Imm = CE->getValue();
1421    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1422  }
1423
1424  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1425    assert(N == 1 && "Invalid number of operands!");
1426    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1427    // the instruction as well.
1428    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1429    int Val = CE->getValue();
1430    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1431  }
1432
1433  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1434    assert(N == 1 && "Invalid number of operands!");
1435    // The operand is actually a t2_so_imm, but we have its bitwise
1436    // negation in the assembly source, so twiddle it here.
1437    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1438    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1439  }
1440
1441  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1442    assert(N == 1 && "Invalid number of operands!");
1443    // The operand is actually a t2_so_imm, but we have its
1444    // negation in the assembly source, so twiddle it here.
1445    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1446    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1447  }
1448
1449  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1450    assert(N == 1 && "Invalid number of operands!");
1451    // The operand is actually a so_imm, but we have its bitwise
1452    // negation in the assembly source, so twiddle it here.
1453    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1454    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1455  }
1456
1457  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1458    assert(N == 1 && "Invalid number of operands!");
1459    // The operand is actually a so_imm, but we have its
1460    // negation in the assembly source, so twiddle it here.
1461    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1462    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1463  }
1464
1465  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1466    assert(N == 1 && "Invalid number of operands!");
1467    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1468  }
1469
1470  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1471    assert(N == 1 && "Invalid number of operands!");
1472    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1473  }
1474
1475  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1476    assert(N == 2 && "Invalid number of operands!");
1477    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1478    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1479  }
1480
1481  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1482    assert(N == 3 && "Invalid number of operands!");
1483    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1484    if (!Memory.OffsetRegNum) {
1485      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1486      // Special case for #-0
1487      if (Val == INT32_MIN) Val = 0;
1488      if (Val < 0) Val = -Val;
1489      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1490    } else {
1491      // For register offset, we encode the shift type and negation flag
1492      // here.
1493      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1494                              Memory.ShiftImm, Memory.ShiftType);
1495    }
1496    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1497    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1498    Inst.addOperand(MCOperand::CreateImm(Val));
1499  }
1500
1501  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1502    assert(N == 2 && "Invalid number of operands!");
1503    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1504    assert(CE && "non-constant AM2OffsetImm operand!");
1505    int32_t Val = CE->getValue();
1506    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1507    // Special case for #-0
1508    if (Val == INT32_MIN) Val = 0;
1509    if (Val < 0) Val = -Val;
1510    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1511    Inst.addOperand(MCOperand::CreateReg(0));
1512    Inst.addOperand(MCOperand::CreateImm(Val));
1513  }
1514
1515  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1516    assert(N == 3 && "Invalid number of operands!");
1517    // If we have an immediate that's not a constant, treat it as a label
1518    // reference needing a fixup. If it is a constant, it's something else
1519    // and we reject it.
1520    if (isImm()) {
1521      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1522      Inst.addOperand(MCOperand::CreateReg(0));
1523      Inst.addOperand(MCOperand::CreateImm(0));
1524      return;
1525    }
1526
1527    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1528    if (!Memory.OffsetRegNum) {
1529      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1530      // Special case for #-0
1531      if (Val == INT32_MIN) Val = 0;
1532      if (Val < 0) Val = -Val;
1533      Val = ARM_AM::getAM3Opc(AddSub, Val);
1534    } else {
1535      // For register offset, we encode the shift type and negation flag
1536      // here.
1537      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1538    }
1539    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1540    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1541    Inst.addOperand(MCOperand::CreateImm(Val));
1542  }
1543
1544  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1545    assert(N == 2 && "Invalid number of operands!");
1546    if (Kind == k_PostIndexRegister) {
1547      int32_t Val =
1548        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1549      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1550      Inst.addOperand(MCOperand::CreateImm(Val));
1551      return;
1552    }
1553
1554    // Constant offset.
1555    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1556    int32_t Val = CE->getValue();
1557    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1558    // Special case for #-0
1559    if (Val == INT32_MIN) Val = 0;
1560    if (Val < 0) Val = -Val;
1561    Val = ARM_AM::getAM3Opc(AddSub, Val);
1562    Inst.addOperand(MCOperand::CreateReg(0));
1563    Inst.addOperand(MCOperand::CreateImm(Val));
1564  }
1565
1566  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1567    assert(N == 2 && "Invalid number of operands!");
1568    // If we have an immediate that's not a constant, treat it as a label
1569    // reference needing a fixup. If it is a constant, it's something else
1570    // and we reject it.
1571    if (isImm()) {
1572      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1573      Inst.addOperand(MCOperand::CreateImm(0));
1574      return;
1575    }
1576
1577    // The lower two bits are always zero and as such are not encoded.
1578    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1579    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1580    // Special case for #-0
1581    if (Val == INT32_MIN) Val = 0;
1582    if (Val < 0) Val = -Val;
1583    Val = ARM_AM::getAM5Opc(AddSub, Val);
1584    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1585    Inst.addOperand(MCOperand::CreateImm(Val));
1586  }
1587
1588  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1589    assert(N == 2 && "Invalid number of operands!");
1590    // If we have an immediate that's not a constant, treat it as a label
1591    // reference needing a fixup. If it is a constant, it's something else
1592    // and we reject it.
1593    if (isImm()) {
1594      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1595      Inst.addOperand(MCOperand::CreateImm(0));
1596      return;
1597    }
1598
1599    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1600    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1601    Inst.addOperand(MCOperand::CreateImm(Val));
1602  }
1603
1604  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1605    assert(N == 2 && "Invalid number of operands!");
1606    // The lower two bits are always zero and as such are not encoded.
1607    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1608    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1609    Inst.addOperand(MCOperand::CreateImm(Val));
1610  }
1611
1612  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1613    assert(N == 2 && "Invalid number of operands!");
1614    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1615    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1616    Inst.addOperand(MCOperand::CreateImm(Val));
1617  }
1618
1619  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1620    addMemImm8OffsetOperands(Inst, N);
1621  }
1622
1623  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1624    addMemImm8OffsetOperands(Inst, N);
1625  }
1626
1627  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1628    assert(N == 2 && "Invalid number of operands!");
1629    // If this is an immediate, it's a label reference.
1630    if (Kind == k_Immediate) {
1631      addExpr(Inst, getImm());
1632      Inst.addOperand(MCOperand::CreateImm(0));
1633      return;
1634    }
1635
1636    // Otherwise, it's a normal memory reg+offset.
1637    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1638    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1639    Inst.addOperand(MCOperand::CreateImm(Val));
1640  }
1641
1642  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1643    assert(N == 2 && "Invalid number of operands!");
1644    // If this is an immediate, it's a label reference.
1645    if (Kind == k_Immediate) {
1646      addExpr(Inst, getImm());
1647      Inst.addOperand(MCOperand::CreateImm(0));
1648      return;
1649    }
1650
1651    // Otherwise, it's a normal memory reg+offset.
1652    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1653    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1654    Inst.addOperand(MCOperand::CreateImm(Val));
1655  }
1656
1657  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1658    assert(N == 2 && "Invalid number of operands!");
1659    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1660    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1661  }
1662
1663  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1664    assert(N == 2 && "Invalid number of operands!");
1665    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1666    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1667  }
1668
1669  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1670    assert(N == 3 && "Invalid number of operands!");
1671    unsigned Val =
1672      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1673                        Memory.ShiftImm, Memory.ShiftType);
1674    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1675    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1676    Inst.addOperand(MCOperand::CreateImm(Val));
1677  }
1678
1679  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1680    assert(N == 3 && "Invalid number of operands!");
1681    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1682    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1683    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1684  }
1685
1686  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1687    assert(N == 2 && "Invalid number of operands!");
1688    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1689    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1690  }
1691
1692  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1693    assert(N == 2 && "Invalid number of operands!");
1694    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1695    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1696    Inst.addOperand(MCOperand::CreateImm(Val));
1697  }
1698
1699  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1700    assert(N == 2 && "Invalid number of operands!");
1701    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1702    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1703    Inst.addOperand(MCOperand::CreateImm(Val));
1704  }
1705
1706  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1707    assert(N == 2 && "Invalid number of operands!");
1708    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1709    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1710    Inst.addOperand(MCOperand::CreateImm(Val));
1711  }
1712
1713  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1714    assert(N == 2 && "Invalid number of operands!");
1715    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1716    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1717    Inst.addOperand(MCOperand::CreateImm(Val));
1718  }
1719
1720  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1721    assert(N == 1 && "Invalid number of operands!");
1722    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1723    assert(CE && "non-constant post-idx-imm8 operand!");
1724    int Imm = CE->getValue();
1725    bool isAdd = Imm >= 0;
1726    if (Imm == INT32_MIN) Imm = 0;
1727    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1728    Inst.addOperand(MCOperand::CreateImm(Imm));
1729  }
1730
1731  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1732    assert(N == 1 && "Invalid number of operands!");
1733    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1734    assert(CE && "non-constant post-idx-imm8s4 operand!");
1735    int Imm = CE->getValue();
1736    bool isAdd = Imm >= 0;
1737    if (Imm == INT32_MIN) Imm = 0;
1738    // Immediate is scaled by 4.
1739    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1740    Inst.addOperand(MCOperand::CreateImm(Imm));
1741  }
1742
1743  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1744    assert(N == 2 && "Invalid number of operands!");
1745    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1746    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1747  }
1748
1749  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1750    assert(N == 2 && "Invalid number of operands!");
1751    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1752    // The sign, shift type, and shift amount are encoded in a single operand
1753    // using the AM2 encoding helpers.
1754    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1755    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1756                                     PostIdxReg.ShiftTy);
1757    Inst.addOperand(MCOperand::CreateImm(Imm));
1758  }
1759
1760  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1761    assert(N == 1 && "Invalid number of operands!");
1762    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1763  }
1764
1765  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1766    assert(N == 1 && "Invalid number of operands!");
1767    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1768  }
1769
1770  void addVecListOperands(MCInst &Inst, unsigned N) const {
1771    assert(N == 1 && "Invalid number of operands!");
1772    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1773  }
1774
1775  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1776    assert(N == 2 && "Invalid number of operands!");
1777    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1778    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1779  }
1780
1781  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1782    assert(N == 1 && "Invalid number of operands!");
1783    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1784  }
1785
1786  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1787    assert(N == 1 && "Invalid number of operands!");
1788    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1789  }
1790
1791  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1792    assert(N == 1 && "Invalid number of operands!");
1793    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1794  }
1795
1796  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1797    assert(N == 1 && "Invalid number of operands!");
1798    // The immediate encodes the type of constant as well as the value.
1799    // Mask in that this is an i8 splat.
1800    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1801    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1802  }
1803
1804  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1805    assert(N == 1 && "Invalid number of operands!");
1806    // The immediate encodes the type of constant as well as the value.
1807    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1808    unsigned Value = CE->getValue();
1809    if (Value >= 256)
1810      Value = (Value >> 8) | 0xa00;
1811    else
1812      Value |= 0x800;
1813    Inst.addOperand(MCOperand::CreateImm(Value));
1814  }
1815
1816  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1817    assert(N == 1 && "Invalid number of operands!");
1818    // The immediate encodes the type of constant as well as the value.
1819    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1820    unsigned Value = CE->getValue();
1821    if (Value >= 256 && Value <= 0xff00)
1822      Value = (Value >> 8) | 0x200;
1823    else if (Value > 0xffff && Value <= 0xff0000)
1824      Value = (Value >> 16) | 0x400;
1825    else if (Value > 0xffffff)
1826      Value = (Value >> 24) | 0x600;
1827    Inst.addOperand(MCOperand::CreateImm(Value));
1828  }
1829
1830  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1831    assert(N == 1 && "Invalid number of operands!");
1832    // The immediate encodes the type of constant as well as the value.
1833    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1834    unsigned Value = CE->getValue();
1835    if (Value >= 256 && Value <= 0xffff)
1836      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1837    else if (Value > 0xffff && Value <= 0xffffff)
1838      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1839    else if (Value > 0xffffff)
1840      Value = (Value >> 24) | 0x600;
1841    Inst.addOperand(MCOperand::CreateImm(Value));
1842  }
1843
1844  void addNEONi32vmovNegOperands(MCInst &Inst, unsigned N) const {
1845    assert(N == 1 && "Invalid number of operands!");
1846    // The immediate encodes the type of constant as well as the value.
1847    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1848    unsigned Value = ~CE->getValue();
1849    if (Value >= 256 && Value <= 0xffff)
1850      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1851    else if (Value > 0xffff && Value <= 0xffffff)
1852      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1853    else if (Value > 0xffffff)
1854      Value = (Value >> 24) | 0x600;
1855    Inst.addOperand(MCOperand::CreateImm(Value));
1856  }
1857
1858  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1859    assert(N == 1 && "Invalid number of operands!");
1860    // The immediate encodes the type of constant as well as the value.
1861    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1862    uint64_t Value = CE->getValue();
1863    unsigned Imm = 0;
1864    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1865      Imm |= (Value & 1) << i;
1866    }
1867    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1868  }
1869
1870  virtual void print(raw_ostream &OS) const;
1871
1872  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1873    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1874    Op->ITMask.Mask = Mask;
1875    Op->StartLoc = S;
1876    Op->EndLoc = S;
1877    return Op;
1878  }
1879
1880  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1881    ARMOperand *Op = new ARMOperand(k_CondCode);
1882    Op->CC.Val = CC;
1883    Op->StartLoc = S;
1884    Op->EndLoc = S;
1885    return Op;
1886  }
1887
1888  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1889    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1890    Op->Cop.Val = CopVal;
1891    Op->StartLoc = S;
1892    Op->EndLoc = S;
1893    return Op;
1894  }
1895
1896  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1897    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1898    Op->Cop.Val = CopVal;
1899    Op->StartLoc = S;
1900    Op->EndLoc = S;
1901    return Op;
1902  }
1903
1904  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1905    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1906    Op->Cop.Val = Val;
1907    Op->StartLoc = S;
1908    Op->EndLoc = E;
1909    return Op;
1910  }
1911
1912  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1913    ARMOperand *Op = new ARMOperand(k_CCOut);
1914    Op->Reg.RegNum = RegNum;
1915    Op->StartLoc = S;
1916    Op->EndLoc = S;
1917    return Op;
1918  }
1919
1920  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1921    ARMOperand *Op = new ARMOperand(k_Token);
1922    Op->Tok.Data = Str.data();
1923    Op->Tok.Length = Str.size();
1924    Op->StartLoc = S;
1925    Op->EndLoc = S;
1926    return Op;
1927  }
1928
1929  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1930    ARMOperand *Op = new ARMOperand(k_Register);
1931    Op->Reg.RegNum = RegNum;
1932    Op->StartLoc = S;
1933    Op->EndLoc = E;
1934    return Op;
1935  }
1936
1937  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1938                                           unsigned SrcReg,
1939                                           unsigned ShiftReg,
1940                                           unsigned ShiftImm,
1941                                           SMLoc S, SMLoc E) {
1942    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1943    Op->RegShiftedReg.ShiftTy = ShTy;
1944    Op->RegShiftedReg.SrcReg = SrcReg;
1945    Op->RegShiftedReg.ShiftReg = ShiftReg;
1946    Op->RegShiftedReg.ShiftImm = ShiftImm;
1947    Op->StartLoc = S;
1948    Op->EndLoc = E;
1949    return Op;
1950  }
1951
1952  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1953                                            unsigned SrcReg,
1954                                            unsigned ShiftImm,
1955                                            SMLoc S, SMLoc E) {
1956    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1957    Op->RegShiftedImm.ShiftTy = ShTy;
1958    Op->RegShiftedImm.SrcReg = SrcReg;
1959    Op->RegShiftedImm.ShiftImm = ShiftImm;
1960    Op->StartLoc = S;
1961    Op->EndLoc = E;
1962    return Op;
1963  }
1964
1965  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1966                                   SMLoc S, SMLoc E) {
1967    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1968    Op->ShifterImm.isASR = isASR;
1969    Op->ShifterImm.Imm = Imm;
1970    Op->StartLoc = S;
1971    Op->EndLoc = E;
1972    return Op;
1973  }
1974
1975  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1976    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1977    Op->RotImm.Imm = Imm;
1978    Op->StartLoc = S;
1979    Op->EndLoc = E;
1980    return Op;
1981  }
1982
1983  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1984                                    SMLoc S, SMLoc E) {
1985    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1986    Op->Bitfield.LSB = LSB;
1987    Op->Bitfield.Width = Width;
1988    Op->StartLoc = S;
1989    Op->EndLoc = E;
1990    return Op;
1991  }
1992
1993  static ARMOperand *
1994  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1995                SMLoc StartLoc, SMLoc EndLoc) {
1996    KindTy Kind = k_RegisterList;
1997
1998    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1999      Kind = k_DPRRegisterList;
2000    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
2001             contains(Regs.front().first))
2002      Kind = k_SPRRegisterList;
2003
2004    ARMOperand *Op = new ARMOperand(Kind);
2005    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
2006           I = Regs.begin(), E = Regs.end(); I != E; ++I)
2007      Op->Registers.push_back(I->first);
2008    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
2009    Op->StartLoc = StartLoc;
2010    Op->EndLoc = EndLoc;
2011    return Op;
2012  }
2013
2014  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
2015                                      bool isDoubleSpaced, SMLoc S, SMLoc E) {
2016    ARMOperand *Op = new ARMOperand(k_VectorList);
2017    Op->VectorList.RegNum = RegNum;
2018    Op->VectorList.Count = Count;
2019    Op->VectorList.isDoubleSpaced = isDoubleSpaced;
2020    Op->StartLoc = S;
2021    Op->EndLoc = E;
2022    return Op;
2023  }
2024
2025  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
2026                                              SMLoc S, SMLoc E) {
2027    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
2028    Op->VectorList.RegNum = RegNum;
2029    Op->VectorList.Count = Count;
2030    Op->StartLoc = S;
2031    Op->EndLoc = E;
2032    return Op;
2033  }
2034
2035  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
2036                                             unsigned Index, SMLoc S, SMLoc E) {
2037    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
2038    Op->VectorList.RegNum = RegNum;
2039    Op->VectorList.Count = Count;
2040    Op->VectorList.LaneIndex = Index;
2041    Op->StartLoc = S;
2042    Op->EndLoc = E;
2043    return Op;
2044  }
2045
2046  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
2047                                       MCContext &Ctx) {
2048    ARMOperand *Op = new ARMOperand(k_VectorIndex);
2049    Op->VectorIndex.Val = Idx;
2050    Op->StartLoc = S;
2051    Op->EndLoc = E;
2052    return Op;
2053  }
2054
2055  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
2056    ARMOperand *Op = new ARMOperand(k_Immediate);
2057    Op->Imm.Val = Val;
2058    Op->StartLoc = S;
2059    Op->EndLoc = E;
2060    return Op;
2061  }
2062
2063  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
2064    ARMOperand *Op = new ARMOperand(k_FPImmediate);
2065    Op->FPImm.Val = Val;
2066    Op->StartLoc = S;
2067    Op->EndLoc = S;
2068    return Op;
2069  }
2070
2071  static ARMOperand *CreateMem(unsigned BaseRegNum,
2072                               const MCConstantExpr *OffsetImm,
2073                               unsigned OffsetRegNum,
2074                               ARM_AM::ShiftOpc ShiftType,
2075                               unsigned ShiftImm,
2076                               unsigned Alignment,
2077                               bool isNegative,
2078                               SMLoc S, SMLoc E) {
2079    ARMOperand *Op = new ARMOperand(k_Memory);
2080    Op->Memory.BaseRegNum = BaseRegNum;
2081    Op->Memory.OffsetImm = OffsetImm;
2082    Op->Memory.OffsetRegNum = OffsetRegNum;
2083    Op->Memory.ShiftType = ShiftType;
2084    Op->Memory.ShiftImm = ShiftImm;
2085    Op->Memory.Alignment = Alignment;
2086    Op->Memory.isNegative = isNegative;
2087    Op->StartLoc = S;
2088    Op->EndLoc = E;
2089    return Op;
2090  }
2091
2092  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
2093                                      ARM_AM::ShiftOpc ShiftTy,
2094                                      unsigned ShiftImm,
2095                                      SMLoc S, SMLoc E) {
2096    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
2097    Op->PostIdxReg.RegNum = RegNum;
2098    Op->PostIdxReg.isAdd = isAdd;
2099    Op->PostIdxReg.ShiftTy = ShiftTy;
2100    Op->PostIdxReg.ShiftImm = ShiftImm;
2101    Op->StartLoc = S;
2102    Op->EndLoc = E;
2103    return Op;
2104  }
2105
2106  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
2107    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
2108    Op->MBOpt.Val = Opt;
2109    Op->StartLoc = S;
2110    Op->EndLoc = S;
2111    return Op;
2112  }
2113
2114  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
2115    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
2116    Op->IFlags.Val = IFlags;
2117    Op->StartLoc = S;
2118    Op->EndLoc = S;
2119    return Op;
2120  }
2121
2122  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
2123    ARMOperand *Op = new ARMOperand(k_MSRMask);
2124    Op->MMask.Val = MMask;
2125    Op->StartLoc = S;
2126    Op->EndLoc = S;
2127    return Op;
2128  }
2129};
2130
2131} // end anonymous namespace.
2132
2133void ARMOperand::print(raw_ostream &OS) const {
2134  switch (Kind) {
2135  case k_FPImmediate:
2136    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2137       << ") >";
2138    break;
2139  case k_CondCode:
2140    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2141    break;
2142  case k_CCOut:
2143    OS << "<ccout " << getReg() << ">";
2144    break;
2145  case k_ITCondMask: {
2146    static const char *MaskStr[] = {
2147      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2148      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2149    };
2150    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2151    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2152    break;
2153  }
2154  case k_CoprocNum:
2155    OS << "<coprocessor number: " << getCoproc() << ">";
2156    break;
2157  case k_CoprocReg:
2158    OS << "<coprocessor register: " << getCoproc() << ">";
2159    break;
2160  case k_CoprocOption:
2161    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2162    break;
2163  case k_MSRMask:
2164    OS << "<mask: " << getMSRMask() << ">";
2165    break;
2166  case k_Immediate:
2167    getImm()->print(OS);
2168    break;
2169  case k_MemBarrierOpt:
2170    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2171    break;
2172  case k_Memory:
2173    OS << "<memory "
2174       << " base:" << Memory.BaseRegNum;
2175    OS << ">";
2176    break;
2177  case k_PostIndexRegister:
2178    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2179       << PostIdxReg.RegNum;
2180    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2181      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2182         << PostIdxReg.ShiftImm;
2183    OS << ">";
2184    break;
2185  case k_ProcIFlags: {
2186    OS << "<ARM_PROC::";
2187    unsigned IFlags = getProcIFlags();
2188    for (int i=2; i >= 0; --i)
2189      if (IFlags & (1 << i))
2190        OS << ARM_PROC::IFlagsToString(1 << i);
2191    OS << ">";
2192    break;
2193  }
2194  case k_Register:
2195    OS << "<register " << getReg() << ">";
2196    break;
2197  case k_ShifterImmediate:
2198    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2199       << " #" << ShifterImm.Imm << ">";
2200    break;
2201  case k_ShiftedRegister:
2202    OS << "<so_reg_reg "
2203       << RegShiftedReg.SrcReg << " "
2204       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2205       << " " << RegShiftedReg.ShiftReg << ">";
2206    break;
2207  case k_ShiftedImmediate:
2208    OS << "<so_reg_imm "
2209       << RegShiftedImm.SrcReg << " "
2210       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2211       << " #" << RegShiftedImm.ShiftImm << ">";
2212    break;
2213  case k_RotateImmediate:
2214    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2215    break;
2216  case k_BitfieldDescriptor:
2217    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2218       << ", width: " << Bitfield.Width << ">";
2219    break;
2220  case k_RegisterList:
2221  case k_DPRRegisterList:
2222  case k_SPRRegisterList: {
2223    OS << "<register_list ";
2224
2225    const SmallVectorImpl<unsigned> &RegList = getRegList();
2226    for (SmallVectorImpl<unsigned>::const_iterator
2227           I = RegList.begin(), E = RegList.end(); I != E; ) {
2228      OS << *I;
2229      if (++I < E) OS << ", ";
2230    }
2231
2232    OS << ">";
2233    break;
2234  }
2235  case k_VectorList:
2236    OS << "<vector_list " << VectorList.Count << " * "
2237       << VectorList.RegNum << ">";
2238    break;
2239  case k_VectorListAllLanes:
2240    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2241       << VectorList.RegNum << ">";
2242    break;
2243  case k_VectorListIndexed:
2244    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2245       << VectorList.Count << " * " << VectorList.RegNum << ">";
2246    break;
2247  case k_Token:
2248    OS << "'" << getToken() << "'";
2249    break;
2250  case k_VectorIndex:
2251    OS << "<vectorindex " << getVectorIndex() << ">";
2252    break;
2253  }
2254}
2255
2256/// @name Auto-generated Match Functions
2257/// {
2258
2259static unsigned MatchRegisterName(StringRef Name);
2260
2261/// }
2262
2263bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2264                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2265  StartLoc = Parser.getTok().getLoc();
2266  RegNo = tryParseRegister();
2267  EndLoc = Parser.getTok().getLoc();
2268
2269  return (RegNo == (unsigned)-1);
2270}
2271
2272/// Try to parse a register name.  The token must be an Identifier when called,
2273/// and if it is a register name the token is eaten and the register number is
2274/// returned.  Otherwise return -1.
2275///
2276int ARMAsmParser::tryParseRegister() {
2277  const AsmToken &Tok = Parser.getTok();
2278  if (Tok.isNot(AsmToken::Identifier)) return -1;
2279
2280  std::string lowerCase = Tok.getString().lower();
2281  unsigned RegNum = MatchRegisterName(lowerCase);
2282  if (!RegNum) {
2283    RegNum = StringSwitch<unsigned>(lowerCase)
2284      .Case("r13", ARM::SP)
2285      .Case("r14", ARM::LR)
2286      .Case("r15", ARM::PC)
2287      .Case("ip", ARM::R12)
2288      // Additional register name aliases for 'gas' compatibility.
2289      .Case("a1", ARM::R0)
2290      .Case("a2", ARM::R1)
2291      .Case("a3", ARM::R2)
2292      .Case("a4", ARM::R3)
2293      .Case("v1", ARM::R4)
2294      .Case("v2", ARM::R5)
2295      .Case("v3", ARM::R6)
2296      .Case("v4", ARM::R7)
2297      .Case("v5", ARM::R8)
2298      .Case("v6", ARM::R9)
2299      .Case("v7", ARM::R10)
2300      .Case("v8", ARM::R11)
2301      .Case("sb", ARM::R9)
2302      .Case("sl", ARM::R10)
2303      .Case("fp", ARM::R11)
2304      .Default(0);
2305  }
2306  if (!RegNum) {
2307    // Check for aliases registered via .req.
2308    StringMap<unsigned>::const_iterator Entry =
2309      RegisterReqs.find(Tok.getIdentifier());
2310    // If no match, return failure.
2311    if (Entry == RegisterReqs.end())
2312      return -1;
2313    Parser.Lex(); // Eat identifier token.
2314    return Entry->getValue();
2315  }
2316
2317  Parser.Lex(); // Eat identifier token.
2318
2319  return RegNum;
2320}
2321
2322// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2323// If a recoverable error occurs, return 1. If an irrecoverable error
2324// occurs, return -1. An irrecoverable error is one where tokens have been
2325// consumed in the process of trying to parse the shifter (i.e., when it is
2326// indeed a shifter operand, but malformed).
2327int ARMAsmParser::tryParseShiftRegister(
2328                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2329  SMLoc S = Parser.getTok().getLoc();
2330  const AsmToken &Tok = Parser.getTok();
2331  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2332
2333  std::string lowerCase = Tok.getString().lower();
2334  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2335      .Case("asl", ARM_AM::lsl)
2336      .Case("lsl", ARM_AM::lsl)
2337      .Case("lsr", ARM_AM::lsr)
2338      .Case("asr", ARM_AM::asr)
2339      .Case("ror", ARM_AM::ror)
2340      .Case("rrx", ARM_AM::rrx)
2341      .Default(ARM_AM::no_shift);
2342
2343  if (ShiftTy == ARM_AM::no_shift)
2344    return 1;
2345
2346  Parser.Lex(); // Eat the operator.
2347
2348  // The source register for the shift has already been added to the
2349  // operand list, so we need to pop it off and combine it into the shifted
2350  // register operand instead.
2351  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2352  if (!PrevOp->isReg())
2353    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2354  int SrcReg = PrevOp->getReg();
2355  int64_t Imm = 0;
2356  int ShiftReg = 0;
2357  if (ShiftTy == ARM_AM::rrx) {
2358    // RRX Doesn't have an explicit shift amount. The encoder expects
2359    // the shift register to be the same as the source register. Seems odd,
2360    // but OK.
2361    ShiftReg = SrcReg;
2362  } else {
2363    // Figure out if this is shifted by a constant or a register (for non-RRX).
2364    if (Parser.getTok().is(AsmToken::Hash) ||
2365        Parser.getTok().is(AsmToken::Dollar)) {
2366      Parser.Lex(); // Eat hash.
2367      SMLoc ImmLoc = Parser.getTok().getLoc();
2368      const MCExpr *ShiftExpr = 0;
2369      if (getParser().ParseExpression(ShiftExpr)) {
2370        Error(ImmLoc, "invalid immediate shift value");
2371        return -1;
2372      }
2373      // The expression must be evaluatable as an immediate.
2374      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2375      if (!CE) {
2376        Error(ImmLoc, "invalid immediate shift value");
2377        return -1;
2378      }
2379      // Range check the immediate.
2380      // lsl, ror: 0 <= imm <= 31
2381      // lsr, asr: 0 <= imm <= 32
2382      Imm = CE->getValue();
2383      if (Imm < 0 ||
2384          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2385          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2386        Error(ImmLoc, "immediate shift value out of range");
2387        return -1;
2388      }
2389    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2390      ShiftReg = tryParseRegister();
2391      SMLoc L = Parser.getTok().getLoc();
2392      if (ShiftReg == -1) {
2393        Error (L, "expected immediate or register in shift operand");
2394        return -1;
2395      }
2396    } else {
2397      Error (Parser.getTok().getLoc(),
2398                    "expected immediate or register in shift operand");
2399      return -1;
2400    }
2401  }
2402
2403  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2404    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2405                                                         ShiftReg, Imm,
2406                                               S, Parser.getTok().getLoc()));
2407  else
2408    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2409                                               S, Parser.getTok().getLoc()));
2410
2411  return 0;
2412}
2413
2414
2415/// Try to parse a register name.  The token must be an Identifier when called.
2416/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2417/// if there is a "writeback". 'true' if it's not a register.
2418///
2419/// TODO this is likely to change to allow different register types and or to
2420/// parse for a specific register type.
2421bool ARMAsmParser::
2422tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2423  SMLoc S = Parser.getTok().getLoc();
2424  int RegNo = tryParseRegister();
2425  if (RegNo == -1)
2426    return true;
2427
2428  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2429
2430  const AsmToken &ExclaimTok = Parser.getTok();
2431  if (ExclaimTok.is(AsmToken::Exclaim)) {
2432    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2433                                               ExclaimTok.getLoc()));
2434    Parser.Lex(); // Eat exclaim token
2435    return false;
2436  }
2437
2438  // Also check for an index operand. This is only legal for vector registers,
2439  // but that'll get caught OK in operand matching, so we don't need to
2440  // explicitly filter everything else out here.
2441  if (Parser.getTok().is(AsmToken::LBrac)) {
2442    SMLoc SIdx = Parser.getTok().getLoc();
2443    Parser.Lex(); // Eat left bracket token.
2444
2445    const MCExpr *ImmVal;
2446    if (getParser().ParseExpression(ImmVal))
2447      return MatchOperand_ParseFail;
2448    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2449    if (!MCE) {
2450      TokError("immediate value expected for vector index");
2451      return MatchOperand_ParseFail;
2452    }
2453
2454    SMLoc E = Parser.getTok().getLoc();
2455    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2456      Error(E, "']' expected");
2457      return MatchOperand_ParseFail;
2458    }
2459
2460    Parser.Lex(); // Eat right bracket token.
2461
2462    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2463                                                     SIdx, E,
2464                                                     getContext()));
2465  }
2466
2467  return false;
2468}
2469
2470/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2471/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2472/// "c5", ...
2473static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2474  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2475  // but efficient.
2476  switch (Name.size()) {
2477  default: break;
2478  case 2:
2479    if (Name[0] != CoprocOp)
2480      return -1;
2481    switch (Name[1]) {
2482    default:  return -1;
2483    case '0': return 0;
2484    case '1': return 1;
2485    case '2': return 2;
2486    case '3': return 3;
2487    case '4': return 4;
2488    case '5': return 5;
2489    case '6': return 6;
2490    case '7': return 7;
2491    case '8': return 8;
2492    case '9': return 9;
2493    }
2494    break;
2495  case 3:
2496    if (Name[0] != CoprocOp || Name[1] != '1')
2497      return -1;
2498    switch (Name[2]) {
2499    default:  return -1;
2500    case '0': return 10;
2501    case '1': return 11;
2502    case '2': return 12;
2503    case '3': return 13;
2504    case '4': return 14;
2505    case '5': return 15;
2506    }
2507    break;
2508  }
2509
2510  return -1;
2511}
2512
2513/// parseITCondCode - Try to parse a condition code for an IT instruction.
2514ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2515parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2516  SMLoc S = Parser.getTok().getLoc();
2517  const AsmToken &Tok = Parser.getTok();
2518  if (!Tok.is(AsmToken::Identifier))
2519    return MatchOperand_NoMatch;
2520  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2521    .Case("eq", ARMCC::EQ)
2522    .Case("ne", ARMCC::NE)
2523    .Case("hs", ARMCC::HS)
2524    .Case("cs", ARMCC::HS)
2525    .Case("lo", ARMCC::LO)
2526    .Case("cc", ARMCC::LO)
2527    .Case("mi", ARMCC::MI)
2528    .Case("pl", ARMCC::PL)
2529    .Case("vs", ARMCC::VS)
2530    .Case("vc", ARMCC::VC)
2531    .Case("hi", ARMCC::HI)
2532    .Case("ls", ARMCC::LS)
2533    .Case("ge", ARMCC::GE)
2534    .Case("lt", ARMCC::LT)
2535    .Case("gt", ARMCC::GT)
2536    .Case("le", ARMCC::LE)
2537    .Case("al", ARMCC::AL)
2538    .Default(~0U);
2539  if (CC == ~0U)
2540    return MatchOperand_NoMatch;
2541  Parser.Lex(); // Eat the token.
2542
2543  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2544
2545  return MatchOperand_Success;
2546}
2547
2548/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2549/// token must be an Identifier when called, and if it is a coprocessor
2550/// number, the token is eaten and the operand is added to the operand list.
2551ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2552parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2553  SMLoc S = Parser.getTok().getLoc();
2554  const AsmToken &Tok = Parser.getTok();
2555  if (Tok.isNot(AsmToken::Identifier))
2556    return MatchOperand_NoMatch;
2557
2558  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2559  if (Num == -1)
2560    return MatchOperand_NoMatch;
2561
2562  Parser.Lex(); // Eat identifier token.
2563  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2564  return MatchOperand_Success;
2565}
2566
2567/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2568/// token must be an Identifier when called, and if it is a coprocessor
2569/// number, the token is eaten and the operand is added to the operand list.
2570ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2571parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2572  SMLoc S = Parser.getTok().getLoc();
2573  const AsmToken &Tok = Parser.getTok();
2574  if (Tok.isNot(AsmToken::Identifier))
2575    return MatchOperand_NoMatch;
2576
2577  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2578  if (Reg == -1)
2579    return MatchOperand_NoMatch;
2580
2581  Parser.Lex(); // Eat identifier token.
2582  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2583  return MatchOperand_Success;
2584}
2585
2586/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2587/// coproc_option : '{' imm0_255 '}'
2588ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2589parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2590  SMLoc S = Parser.getTok().getLoc();
2591
2592  // If this isn't a '{', this isn't a coprocessor immediate operand.
2593  if (Parser.getTok().isNot(AsmToken::LCurly))
2594    return MatchOperand_NoMatch;
2595  Parser.Lex(); // Eat the '{'
2596
2597  const MCExpr *Expr;
2598  SMLoc Loc = Parser.getTok().getLoc();
2599  if (getParser().ParseExpression(Expr)) {
2600    Error(Loc, "illegal expression");
2601    return MatchOperand_ParseFail;
2602  }
2603  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2604  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2605    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2606    return MatchOperand_ParseFail;
2607  }
2608  int Val = CE->getValue();
2609
2610  // Check for and consume the closing '}'
2611  if (Parser.getTok().isNot(AsmToken::RCurly))
2612    return MatchOperand_ParseFail;
2613  SMLoc E = Parser.getTok().getLoc();
2614  Parser.Lex(); // Eat the '}'
2615
2616  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2617  return MatchOperand_Success;
2618}
2619
2620// For register list parsing, we need to map from raw GPR register numbering
2621// to the enumeration values. The enumeration values aren't sorted by
2622// register number due to our using "sp", "lr" and "pc" as canonical names.
2623static unsigned getNextRegister(unsigned Reg) {
2624  // If this is a GPR, we need to do it manually, otherwise we can rely
2625  // on the sort ordering of the enumeration since the other reg-classes
2626  // are sane.
2627  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2628    return Reg + 1;
2629  switch(Reg) {
2630  default: assert(0 && "Invalid GPR number!");
2631  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2632  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2633  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2634  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2635  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2636  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2637  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2638  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2639  }
2640}
2641
2642// Return the low-subreg of a given Q register.
2643static unsigned getDRegFromQReg(unsigned QReg) {
2644  switch (QReg) {
2645  default: llvm_unreachable("expected a Q register!");
2646  case ARM::Q0:  return ARM::D0;
2647  case ARM::Q1:  return ARM::D2;
2648  case ARM::Q2:  return ARM::D4;
2649  case ARM::Q3:  return ARM::D6;
2650  case ARM::Q4:  return ARM::D8;
2651  case ARM::Q5:  return ARM::D10;
2652  case ARM::Q6:  return ARM::D12;
2653  case ARM::Q7:  return ARM::D14;
2654  case ARM::Q8:  return ARM::D16;
2655  case ARM::Q9:  return ARM::D18;
2656  case ARM::Q10: return ARM::D20;
2657  case ARM::Q11: return ARM::D22;
2658  case ARM::Q12: return ARM::D24;
2659  case ARM::Q13: return ARM::D26;
2660  case ARM::Q14: return ARM::D28;
2661  case ARM::Q15: return ARM::D30;
2662  }
2663}
2664
2665/// Parse a register list.
2666bool ARMAsmParser::
2667parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2668  assert(Parser.getTok().is(AsmToken::LCurly) &&
2669         "Token is not a Left Curly Brace");
2670  SMLoc S = Parser.getTok().getLoc();
2671  Parser.Lex(); // Eat '{' token.
2672  SMLoc RegLoc = Parser.getTok().getLoc();
2673
2674  // Check the first register in the list to see what register class
2675  // this is a list of.
2676  int Reg = tryParseRegister();
2677  if (Reg == -1)
2678    return Error(RegLoc, "register expected");
2679
2680  // The reglist instructions have at most 16 registers, so reserve
2681  // space for that many.
2682  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2683
2684  // Allow Q regs and just interpret them as the two D sub-registers.
2685  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2686    Reg = getDRegFromQReg(Reg);
2687    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2688    ++Reg;
2689  }
2690  const MCRegisterClass *RC;
2691  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2692    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2693  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2694    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2695  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2696    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2697  else
2698    return Error(RegLoc, "invalid register in register list");
2699
2700  // Store the register.
2701  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2702
2703  // This starts immediately after the first register token in the list,
2704  // so we can see either a comma or a minus (range separator) as a legal
2705  // next token.
2706  while (Parser.getTok().is(AsmToken::Comma) ||
2707         Parser.getTok().is(AsmToken::Minus)) {
2708    if (Parser.getTok().is(AsmToken::Minus)) {
2709      Parser.Lex(); // Eat the minus.
2710      SMLoc EndLoc = Parser.getTok().getLoc();
2711      int EndReg = tryParseRegister();
2712      if (EndReg == -1)
2713        return Error(EndLoc, "register expected");
2714      // Allow Q regs and just interpret them as the two D sub-registers.
2715      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2716        EndReg = getDRegFromQReg(EndReg) + 1;
2717      // If the register is the same as the start reg, there's nothing
2718      // more to do.
2719      if (Reg == EndReg)
2720        continue;
2721      // The register must be in the same register class as the first.
2722      if (!RC->contains(EndReg))
2723        return Error(EndLoc, "invalid register in register list");
2724      // Ranges must go from low to high.
2725      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2726        return Error(EndLoc, "bad range in register list");
2727
2728      // Add all the registers in the range to the register list.
2729      while (Reg != EndReg) {
2730        Reg = getNextRegister(Reg);
2731        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2732      }
2733      continue;
2734    }
2735    Parser.Lex(); // Eat the comma.
2736    RegLoc = Parser.getTok().getLoc();
2737    int OldReg = Reg;
2738    const AsmToken RegTok = Parser.getTok();
2739    Reg = tryParseRegister();
2740    if (Reg == -1)
2741      return Error(RegLoc, "register expected");
2742    // Allow Q regs and just interpret them as the two D sub-registers.
2743    bool isQReg = false;
2744    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2745      Reg = getDRegFromQReg(Reg);
2746      isQReg = true;
2747    }
2748    // The register must be in the same register class as the first.
2749    if (!RC->contains(Reg))
2750      return Error(RegLoc, "invalid register in register list");
2751    // List must be monotonically increasing.
2752    if (getARMRegisterNumbering(Reg) < getARMRegisterNumbering(OldReg))
2753      return Error(RegLoc, "register list not in ascending order");
2754    if (getARMRegisterNumbering(Reg) == getARMRegisterNumbering(OldReg)) {
2755      Warning(RegLoc, "duplicated register (" + RegTok.getString() +
2756              ") in register list");
2757      continue;
2758    }
2759    // VFP register lists must also be contiguous.
2760    // It's OK to use the enumeration values directly here rather, as the
2761    // VFP register classes have the enum sorted properly.
2762    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2763        Reg != OldReg + 1)
2764      return Error(RegLoc, "non-contiguous register range");
2765    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2766    if (isQReg)
2767      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2768  }
2769
2770  SMLoc E = Parser.getTok().getLoc();
2771  if (Parser.getTok().isNot(AsmToken::RCurly))
2772    return Error(E, "'}' expected");
2773  Parser.Lex(); // Eat '}' token.
2774
2775  // Push the register list operand.
2776  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2777
2778  // The ARM system instruction variants for LDM/STM have a '^' token here.
2779  if (Parser.getTok().is(AsmToken::Caret)) {
2780    Operands.push_back(ARMOperand::CreateToken("^",Parser.getTok().getLoc()));
2781    Parser.Lex(); // Eat '^' token.
2782  }
2783
2784  return false;
2785}
2786
2787// Helper function to parse the lane index for vector lists.
2788ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2789parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2790  Index = 0; // Always return a defined index value.
2791  if (Parser.getTok().is(AsmToken::LBrac)) {
2792    Parser.Lex(); // Eat the '['.
2793    if (Parser.getTok().is(AsmToken::RBrac)) {
2794      // "Dn[]" is the 'all lanes' syntax.
2795      LaneKind = AllLanes;
2796      Parser.Lex(); // Eat the ']'.
2797      return MatchOperand_Success;
2798    }
2799    if (Parser.getTok().is(AsmToken::Integer)) {
2800      int64_t Val = Parser.getTok().getIntVal();
2801      // Make this range check context sensitive for .8, .16, .32.
2802      if (Val < 0 && Val > 7)
2803        Error(Parser.getTok().getLoc(), "lane index out of range");
2804      Index = Val;
2805      LaneKind = IndexedLane;
2806      Parser.Lex(); // Eat the token;
2807      if (Parser.getTok().isNot(AsmToken::RBrac))
2808        Error(Parser.getTok().getLoc(), "']' expected");
2809      Parser.Lex(); // Eat the ']'.
2810      return MatchOperand_Success;
2811    }
2812    Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2813    return MatchOperand_ParseFail;
2814  }
2815  LaneKind = NoLanes;
2816  return MatchOperand_Success;
2817}
2818
2819// parse a vector register list
2820ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2821parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2822  VectorLaneTy LaneKind;
2823  unsigned LaneIndex;
2824  SMLoc S = Parser.getTok().getLoc();
2825  // As an extension (to match gas), support a plain D register or Q register
2826  // (without encosing curly braces) as a single or double entry list,
2827  // respectively.
2828  if (Parser.getTok().is(AsmToken::Identifier)) {
2829    int Reg = tryParseRegister();
2830    if (Reg == -1)
2831      return MatchOperand_NoMatch;
2832    SMLoc E = Parser.getTok().getLoc();
2833    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2834      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2835      if (Res != MatchOperand_Success)
2836        return Res;
2837      switch (LaneKind) {
2838      default:
2839        assert(0 && "unexpected lane kind!");
2840      case NoLanes:
2841        E = Parser.getTok().getLoc();
2842        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, false, S, E));
2843        break;
2844      case AllLanes:
2845        E = Parser.getTok().getLoc();
2846        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2847        break;
2848      case IndexedLane:
2849        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2850                                                               LaneIndex, S,E));
2851        break;
2852      }
2853      return MatchOperand_Success;
2854    }
2855    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2856      Reg = getDRegFromQReg(Reg);
2857      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2858      if (Res != MatchOperand_Success)
2859        return Res;
2860      switch (LaneKind) {
2861      default:
2862        assert(0 && "unexpected lane kind!");
2863      case NoLanes:
2864        E = Parser.getTok().getLoc();
2865        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, false, S, E));
2866        break;
2867      case AllLanes:
2868        E = Parser.getTok().getLoc();
2869        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2870        break;
2871      case IndexedLane:
2872        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2873                                                               LaneIndex, S,E));
2874        break;
2875      }
2876      return MatchOperand_Success;
2877    }
2878    Error(S, "vector register expected");
2879    return MatchOperand_ParseFail;
2880  }
2881
2882  if (Parser.getTok().isNot(AsmToken::LCurly))
2883    return MatchOperand_NoMatch;
2884
2885  Parser.Lex(); // Eat '{' token.
2886  SMLoc RegLoc = Parser.getTok().getLoc();
2887
2888  int Reg = tryParseRegister();
2889  if (Reg == -1) {
2890    Error(RegLoc, "register expected");
2891    return MatchOperand_ParseFail;
2892  }
2893  unsigned Count = 1;
2894  int Spacing = 0;
2895  unsigned FirstReg = Reg;
2896  // The list is of D registers, but we also allow Q regs and just interpret
2897  // them as the two D sub-registers.
2898  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2899    FirstReg = Reg = getDRegFromQReg(Reg);
2900    Spacing = 1; // double-spacing requires explicit D registers, otherwise
2901                 // it's ambiguous with four-register single spaced.
2902    ++Reg;
2903    ++Count;
2904  }
2905  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2906    return MatchOperand_ParseFail;
2907
2908  while (Parser.getTok().is(AsmToken::Comma) ||
2909         Parser.getTok().is(AsmToken::Minus)) {
2910    if (Parser.getTok().is(AsmToken::Minus)) {
2911      if (!Spacing)
2912        Spacing = 1; // Register range implies a single spaced list.
2913      else if (Spacing == 2) {
2914        Error(Parser.getTok().getLoc(),
2915              "sequential registers in double spaced list");
2916        return MatchOperand_ParseFail;
2917      }
2918      Parser.Lex(); // Eat the minus.
2919      SMLoc EndLoc = Parser.getTok().getLoc();
2920      int EndReg = tryParseRegister();
2921      if (EndReg == -1) {
2922        Error(EndLoc, "register expected");
2923        return MatchOperand_ParseFail;
2924      }
2925      // Allow Q regs and just interpret them as the two D sub-registers.
2926      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2927        EndReg = getDRegFromQReg(EndReg) + 1;
2928      // If the register is the same as the start reg, there's nothing
2929      // more to do.
2930      if (Reg == EndReg)
2931        continue;
2932      // The register must be in the same register class as the first.
2933      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2934        Error(EndLoc, "invalid register in register list");
2935        return MatchOperand_ParseFail;
2936      }
2937      // Ranges must go from low to high.
2938      if (Reg > EndReg) {
2939        Error(EndLoc, "bad range in register list");
2940        return MatchOperand_ParseFail;
2941      }
2942      // Parse the lane specifier if present.
2943      VectorLaneTy NextLaneKind;
2944      unsigned NextLaneIndex;
2945      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2946        return MatchOperand_ParseFail;
2947      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2948        Error(EndLoc, "mismatched lane index in register list");
2949        return MatchOperand_ParseFail;
2950      }
2951      EndLoc = Parser.getTok().getLoc();
2952
2953      // Add all the registers in the range to the register list.
2954      Count += EndReg - Reg;
2955      Reg = EndReg;
2956      continue;
2957    }
2958    Parser.Lex(); // Eat the comma.
2959    RegLoc = Parser.getTok().getLoc();
2960    int OldReg = Reg;
2961    Reg = tryParseRegister();
2962    if (Reg == -1) {
2963      Error(RegLoc, "register expected");
2964      return MatchOperand_ParseFail;
2965    }
2966    // vector register lists must be contiguous.
2967    // It's OK to use the enumeration values directly here rather, as the
2968    // VFP register classes have the enum sorted properly.
2969    //
2970    // The list is of D registers, but we also allow Q regs and just interpret
2971    // them as the two D sub-registers.
2972    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2973      if (!Spacing)
2974        Spacing = 1; // Register range implies a single spaced list.
2975      else if (Spacing == 2) {
2976        Error(RegLoc,
2977              "invalid register in double-spaced list (must be 'D' register')");
2978        return MatchOperand_ParseFail;
2979      }
2980      Reg = getDRegFromQReg(Reg);
2981      if (Reg != OldReg + 1) {
2982        Error(RegLoc, "non-contiguous register range");
2983        return MatchOperand_ParseFail;
2984      }
2985      ++Reg;
2986      Count += 2;
2987      // Parse the lane specifier if present.
2988      VectorLaneTy NextLaneKind;
2989      unsigned NextLaneIndex;
2990      SMLoc EndLoc = Parser.getTok().getLoc();
2991      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2992        return MatchOperand_ParseFail;
2993      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2994        Error(EndLoc, "mismatched lane index in register list");
2995        return MatchOperand_ParseFail;
2996      }
2997      continue;
2998    }
2999    // Normal D register.
3000    // Figure out the register spacing (single or double) of the list if
3001    // we don't know it already.
3002    if (!Spacing)
3003      Spacing = 1 + (Reg == OldReg + 2);
3004
3005    // Just check that it's contiguous and keep going.
3006    if (Reg != OldReg + Spacing) {
3007      Error(RegLoc, "non-contiguous register range");
3008      return MatchOperand_ParseFail;
3009    }
3010    ++Count;
3011    // Parse the lane specifier if present.
3012    VectorLaneTy NextLaneKind;
3013    unsigned NextLaneIndex;
3014    SMLoc EndLoc = Parser.getTok().getLoc();
3015    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
3016      return MatchOperand_ParseFail;
3017    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
3018      Error(EndLoc, "mismatched lane index in register list");
3019      return MatchOperand_ParseFail;
3020    }
3021    if (Spacing == 2 && LaneKind != NoLanes) {
3022      Error(EndLoc,
3023            "lane index specfier invalid in double spaced register list");
3024      return MatchOperand_ParseFail;
3025    }
3026  }
3027
3028  SMLoc E = Parser.getTok().getLoc();
3029  if (Parser.getTok().isNot(AsmToken::RCurly)) {
3030    Error(E, "'}' expected");
3031    return MatchOperand_ParseFail;
3032  }
3033  Parser.Lex(); // Eat '}' token.
3034
3035  switch (LaneKind) {
3036  default:
3037    assert(0 && "unexpected lane kind in register list.");
3038  case NoLanes:
3039    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count,
3040                                                    (Spacing == 2), S, E));
3041    break;
3042  case AllLanes:
3043    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
3044                                                            S, E));
3045    break;
3046  case IndexedLane:
3047    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
3048                                                           LaneIndex, S, E));
3049    break;
3050  }
3051  return MatchOperand_Success;
3052}
3053
3054/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
3055ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3056parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3057  SMLoc S = Parser.getTok().getLoc();
3058  const AsmToken &Tok = Parser.getTok();
3059  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3060  StringRef OptStr = Tok.getString();
3061
3062  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
3063    .Case("sy",    ARM_MB::SY)
3064    .Case("st",    ARM_MB::ST)
3065    .Case("sh",    ARM_MB::ISH)
3066    .Case("ish",   ARM_MB::ISH)
3067    .Case("shst",  ARM_MB::ISHST)
3068    .Case("ishst", ARM_MB::ISHST)
3069    .Case("nsh",   ARM_MB::NSH)
3070    .Case("un",    ARM_MB::NSH)
3071    .Case("nshst", ARM_MB::NSHST)
3072    .Case("unst",  ARM_MB::NSHST)
3073    .Case("osh",   ARM_MB::OSH)
3074    .Case("oshst", ARM_MB::OSHST)
3075    .Default(~0U);
3076
3077  if (Opt == ~0U)
3078    return MatchOperand_NoMatch;
3079
3080  Parser.Lex(); // Eat identifier token.
3081  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
3082  return MatchOperand_Success;
3083}
3084
3085/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
3086ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3087parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3088  SMLoc S = Parser.getTok().getLoc();
3089  const AsmToken &Tok = Parser.getTok();
3090  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3091  StringRef IFlagsStr = Tok.getString();
3092
3093  // An iflags string of "none" is interpreted to mean that none of the AIF
3094  // bits are set.  Not a terribly useful instruction, but a valid encoding.
3095  unsigned IFlags = 0;
3096  if (IFlagsStr != "none") {
3097        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
3098      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
3099        .Case("a", ARM_PROC::A)
3100        .Case("i", ARM_PROC::I)
3101        .Case("f", ARM_PROC::F)
3102        .Default(~0U);
3103
3104      // If some specific iflag is already set, it means that some letter is
3105      // present more than once, this is not acceptable.
3106      if (Flag == ~0U || (IFlags & Flag))
3107        return MatchOperand_NoMatch;
3108
3109      IFlags |= Flag;
3110    }
3111  }
3112
3113  Parser.Lex(); // Eat identifier token.
3114  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
3115  return MatchOperand_Success;
3116}
3117
3118/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
3119ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3120parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3121  SMLoc S = Parser.getTok().getLoc();
3122  const AsmToken &Tok = Parser.getTok();
3123  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
3124  StringRef Mask = Tok.getString();
3125
3126  if (isMClass()) {
3127    // See ARMv6-M 10.1.1
3128    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
3129      .Case("apsr", 0)
3130      .Case("iapsr", 1)
3131      .Case("eapsr", 2)
3132      .Case("xpsr", 3)
3133      .Case("ipsr", 5)
3134      .Case("epsr", 6)
3135      .Case("iepsr", 7)
3136      .Case("msp", 8)
3137      .Case("psp", 9)
3138      .Case("primask", 16)
3139      .Case("basepri", 17)
3140      .Case("basepri_max", 18)
3141      .Case("faultmask", 19)
3142      .Case("control", 20)
3143      .Default(~0U);
3144
3145    if (FlagsVal == ~0U)
3146      return MatchOperand_NoMatch;
3147
3148    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
3149      // basepri, basepri_max and faultmask only valid for V7m.
3150      return MatchOperand_NoMatch;
3151
3152    Parser.Lex(); // Eat identifier token.
3153    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3154    return MatchOperand_Success;
3155  }
3156
3157  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
3158  size_t Start = 0, Next = Mask.find('_');
3159  StringRef Flags = "";
3160  std::string SpecReg = Mask.slice(Start, Next).lower();
3161  if (Next != StringRef::npos)
3162    Flags = Mask.slice(Next+1, Mask.size());
3163
3164  // FlagsVal contains the complete mask:
3165  // 3-0: Mask
3166  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3167  unsigned FlagsVal = 0;
3168
3169  if (SpecReg == "apsr") {
3170    FlagsVal = StringSwitch<unsigned>(Flags)
3171    .Case("nzcvq",  0x8) // same as CPSR_f
3172    .Case("g",      0x4) // same as CPSR_s
3173    .Case("nzcvqg", 0xc) // same as CPSR_fs
3174    .Default(~0U);
3175
3176    if (FlagsVal == ~0U) {
3177      if (!Flags.empty())
3178        return MatchOperand_NoMatch;
3179      else
3180        FlagsVal = 8; // No flag
3181    }
3182  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3183    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3184      Flags = "fc";
3185    for (int i = 0, e = Flags.size(); i != e; ++i) {
3186      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3187      .Case("c", 1)
3188      .Case("x", 2)
3189      .Case("s", 4)
3190      .Case("f", 8)
3191      .Default(~0U);
3192
3193      // If some specific flag is already set, it means that some letter is
3194      // present more than once, this is not acceptable.
3195      if (FlagsVal == ~0U || (FlagsVal & Flag))
3196        return MatchOperand_NoMatch;
3197      FlagsVal |= Flag;
3198    }
3199  } else // No match for special register.
3200    return MatchOperand_NoMatch;
3201
3202  // Special register without flags is NOT equivalent to "fc" flags.
3203  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3204  // two lines would enable gas compatibility at the expense of breaking
3205  // round-tripping.
3206  //
3207  // if (!FlagsVal)
3208  //  FlagsVal = 0x9;
3209
3210  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3211  if (SpecReg == "spsr")
3212    FlagsVal |= 16;
3213
3214  Parser.Lex(); // Eat identifier token.
3215  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3216  return MatchOperand_Success;
3217}
3218
3219ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3220parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3221            int Low, int High) {
3222  const AsmToken &Tok = Parser.getTok();
3223  if (Tok.isNot(AsmToken::Identifier)) {
3224    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3225    return MatchOperand_ParseFail;
3226  }
3227  StringRef ShiftName = Tok.getString();
3228  std::string LowerOp = Op.lower();
3229  std::string UpperOp = Op.upper();
3230  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3231    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3232    return MatchOperand_ParseFail;
3233  }
3234  Parser.Lex(); // Eat shift type token.
3235
3236  // There must be a '#' and a shift amount.
3237  if (Parser.getTok().isNot(AsmToken::Hash) &&
3238      Parser.getTok().isNot(AsmToken::Dollar)) {
3239    Error(Parser.getTok().getLoc(), "'#' expected");
3240    return MatchOperand_ParseFail;
3241  }
3242  Parser.Lex(); // Eat hash token.
3243
3244  const MCExpr *ShiftAmount;
3245  SMLoc Loc = Parser.getTok().getLoc();
3246  if (getParser().ParseExpression(ShiftAmount)) {
3247    Error(Loc, "illegal expression");
3248    return MatchOperand_ParseFail;
3249  }
3250  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3251  if (!CE) {
3252    Error(Loc, "constant expression expected");
3253    return MatchOperand_ParseFail;
3254  }
3255  int Val = CE->getValue();
3256  if (Val < Low || Val > High) {
3257    Error(Loc, "immediate value out of range");
3258    return MatchOperand_ParseFail;
3259  }
3260
3261  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3262
3263  return MatchOperand_Success;
3264}
3265
3266ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3267parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3268  const AsmToken &Tok = Parser.getTok();
3269  SMLoc S = Tok.getLoc();
3270  if (Tok.isNot(AsmToken::Identifier)) {
3271    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3272    return MatchOperand_ParseFail;
3273  }
3274  int Val = StringSwitch<int>(Tok.getString())
3275    .Case("be", 1)
3276    .Case("le", 0)
3277    .Default(-1);
3278  Parser.Lex(); // Eat the token.
3279
3280  if (Val == -1) {
3281    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3282    return MatchOperand_ParseFail;
3283  }
3284  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3285                                                                  getContext()),
3286                                           S, Parser.getTok().getLoc()));
3287  return MatchOperand_Success;
3288}
3289
3290/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3291/// instructions. Legal values are:
3292///     lsl #n  'n' in [0,31]
3293///     asr #n  'n' in [1,32]
3294///             n == 32 encoded as n == 0.
3295ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3296parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3297  const AsmToken &Tok = Parser.getTok();
3298  SMLoc S = Tok.getLoc();
3299  if (Tok.isNot(AsmToken::Identifier)) {
3300    Error(S, "shift operator 'asr' or 'lsl' expected");
3301    return MatchOperand_ParseFail;
3302  }
3303  StringRef ShiftName = Tok.getString();
3304  bool isASR;
3305  if (ShiftName == "lsl" || ShiftName == "LSL")
3306    isASR = false;
3307  else if (ShiftName == "asr" || ShiftName == "ASR")
3308    isASR = true;
3309  else {
3310    Error(S, "shift operator 'asr' or 'lsl' expected");
3311    return MatchOperand_ParseFail;
3312  }
3313  Parser.Lex(); // Eat the operator.
3314
3315  // A '#' and a shift amount.
3316  if (Parser.getTok().isNot(AsmToken::Hash) &&
3317      Parser.getTok().isNot(AsmToken::Dollar)) {
3318    Error(Parser.getTok().getLoc(), "'#' expected");
3319    return MatchOperand_ParseFail;
3320  }
3321  Parser.Lex(); // Eat hash token.
3322
3323  const MCExpr *ShiftAmount;
3324  SMLoc E = Parser.getTok().getLoc();
3325  if (getParser().ParseExpression(ShiftAmount)) {
3326    Error(E, "malformed shift expression");
3327    return MatchOperand_ParseFail;
3328  }
3329  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3330  if (!CE) {
3331    Error(E, "shift amount must be an immediate");
3332    return MatchOperand_ParseFail;
3333  }
3334
3335  int64_t Val = CE->getValue();
3336  if (isASR) {
3337    // Shift amount must be in [1,32]
3338    if (Val < 1 || Val > 32) {
3339      Error(E, "'asr' shift amount must be in range [1,32]");
3340      return MatchOperand_ParseFail;
3341    }
3342    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3343    if (isThumb() && Val == 32) {
3344      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3345      return MatchOperand_ParseFail;
3346    }
3347    if (Val == 32) Val = 0;
3348  } else {
3349    // Shift amount must be in [1,32]
3350    if (Val < 0 || Val > 31) {
3351      Error(E, "'lsr' shift amount must be in range [0,31]");
3352      return MatchOperand_ParseFail;
3353    }
3354  }
3355
3356  E = Parser.getTok().getLoc();
3357  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3358
3359  return MatchOperand_Success;
3360}
3361
3362/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3363/// of instructions. Legal values are:
3364///     ror #n  'n' in {0, 8, 16, 24}
3365ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3366parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3367  const AsmToken &Tok = Parser.getTok();
3368  SMLoc S = Tok.getLoc();
3369  if (Tok.isNot(AsmToken::Identifier))
3370    return MatchOperand_NoMatch;
3371  StringRef ShiftName = Tok.getString();
3372  if (ShiftName != "ror" && ShiftName != "ROR")
3373    return MatchOperand_NoMatch;
3374  Parser.Lex(); // Eat the operator.
3375
3376  // A '#' and a rotate amount.
3377  if (Parser.getTok().isNot(AsmToken::Hash) &&
3378      Parser.getTok().isNot(AsmToken::Dollar)) {
3379    Error(Parser.getTok().getLoc(), "'#' expected");
3380    return MatchOperand_ParseFail;
3381  }
3382  Parser.Lex(); // Eat hash token.
3383
3384  const MCExpr *ShiftAmount;
3385  SMLoc E = Parser.getTok().getLoc();
3386  if (getParser().ParseExpression(ShiftAmount)) {
3387    Error(E, "malformed rotate expression");
3388    return MatchOperand_ParseFail;
3389  }
3390  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3391  if (!CE) {
3392    Error(E, "rotate amount must be an immediate");
3393    return MatchOperand_ParseFail;
3394  }
3395
3396  int64_t Val = CE->getValue();
3397  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3398  // normally, zero is represented in asm by omitting the rotate operand
3399  // entirely.
3400  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3401    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3402    return MatchOperand_ParseFail;
3403  }
3404
3405  E = Parser.getTok().getLoc();
3406  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3407
3408  return MatchOperand_Success;
3409}
3410
3411ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3412parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3413  SMLoc S = Parser.getTok().getLoc();
3414  // The bitfield descriptor is really two operands, the LSB and the width.
3415  if (Parser.getTok().isNot(AsmToken::Hash) &&
3416      Parser.getTok().isNot(AsmToken::Dollar)) {
3417    Error(Parser.getTok().getLoc(), "'#' expected");
3418    return MatchOperand_ParseFail;
3419  }
3420  Parser.Lex(); // Eat hash token.
3421
3422  const MCExpr *LSBExpr;
3423  SMLoc E = Parser.getTok().getLoc();
3424  if (getParser().ParseExpression(LSBExpr)) {
3425    Error(E, "malformed immediate expression");
3426    return MatchOperand_ParseFail;
3427  }
3428  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3429  if (!CE) {
3430    Error(E, "'lsb' operand must be an immediate");
3431    return MatchOperand_ParseFail;
3432  }
3433
3434  int64_t LSB = CE->getValue();
3435  // The LSB must be in the range [0,31]
3436  if (LSB < 0 || LSB > 31) {
3437    Error(E, "'lsb' operand must be in the range [0,31]");
3438    return MatchOperand_ParseFail;
3439  }
3440  E = Parser.getTok().getLoc();
3441
3442  // Expect another immediate operand.
3443  if (Parser.getTok().isNot(AsmToken::Comma)) {
3444    Error(Parser.getTok().getLoc(), "too few operands");
3445    return MatchOperand_ParseFail;
3446  }
3447  Parser.Lex(); // Eat hash token.
3448  if (Parser.getTok().isNot(AsmToken::Hash) &&
3449      Parser.getTok().isNot(AsmToken::Dollar)) {
3450    Error(Parser.getTok().getLoc(), "'#' expected");
3451    return MatchOperand_ParseFail;
3452  }
3453  Parser.Lex(); // Eat hash token.
3454
3455  const MCExpr *WidthExpr;
3456  if (getParser().ParseExpression(WidthExpr)) {
3457    Error(E, "malformed immediate expression");
3458    return MatchOperand_ParseFail;
3459  }
3460  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3461  if (!CE) {
3462    Error(E, "'width' operand must be an immediate");
3463    return MatchOperand_ParseFail;
3464  }
3465
3466  int64_t Width = CE->getValue();
3467  // The LSB must be in the range [1,32-lsb]
3468  if (Width < 1 || Width > 32 - LSB) {
3469    Error(E, "'width' operand must be in the range [1,32-lsb]");
3470    return MatchOperand_ParseFail;
3471  }
3472  E = Parser.getTok().getLoc();
3473
3474  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3475
3476  return MatchOperand_Success;
3477}
3478
3479ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3480parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3481  // Check for a post-index addressing register operand. Specifically:
3482  // postidx_reg := '+' register {, shift}
3483  //              | '-' register {, shift}
3484  //              | register {, shift}
3485
3486  // This method must return MatchOperand_NoMatch without consuming any tokens
3487  // in the case where there is no match, as other alternatives take other
3488  // parse methods.
3489  AsmToken Tok = Parser.getTok();
3490  SMLoc S = Tok.getLoc();
3491  bool haveEaten = false;
3492  bool isAdd = true;
3493  int Reg = -1;
3494  if (Tok.is(AsmToken::Plus)) {
3495    Parser.Lex(); // Eat the '+' token.
3496    haveEaten = true;
3497  } else if (Tok.is(AsmToken::Minus)) {
3498    Parser.Lex(); // Eat the '-' token.
3499    isAdd = false;
3500    haveEaten = true;
3501  }
3502  if (Parser.getTok().is(AsmToken::Identifier))
3503    Reg = tryParseRegister();
3504  if (Reg == -1) {
3505    if (!haveEaten)
3506      return MatchOperand_NoMatch;
3507    Error(Parser.getTok().getLoc(), "register expected");
3508    return MatchOperand_ParseFail;
3509  }
3510  SMLoc E = Parser.getTok().getLoc();
3511
3512  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3513  unsigned ShiftImm = 0;
3514  if (Parser.getTok().is(AsmToken::Comma)) {
3515    Parser.Lex(); // Eat the ','.
3516    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3517      return MatchOperand_ParseFail;
3518  }
3519
3520  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3521                                                  ShiftImm, S, E));
3522
3523  return MatchOperand_Success;
3524}
3525
3526ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3527parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3528  // Check for a post-index addressing register operand. Specifically:
3529  // am3offset := '+' register
3530  //              | '-' register
3531  //              | register
3532  //              | # imm
3533  //              | # + imm
3534  //              | # - imm
3535
3536  // This method must return MatchOperand_NoMatch without consuming any tokens
3537  // in the case where there is no match, as other alternatives take other
3538  // parse methods.
3539  AsmToken Tok = Parser.getTok();
3540  SMLoc S = Tok.getLoc();
3541
3542  // Do immediates first, as we always parse those if we have a '#'.
3543  if (Parser.getTok().is(AsmToken::Hash) ||
3544      Parser.getTok().is(AsmToken::Dollar)) {
3545    Parser.Lex(); // Eat the '#'.
3546    // Explicitly look for a '-', as we need to encode negative zero
3547    // differently.
3548    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3549    const MCExpr *Offset;
3550    if (getParser().ParseExpression(Offset))
3551      return MatchOperand_ParseFail;
3552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3553    if (!CE) {
3554      Error(S, "constant expression expected");
3555      return MatchOperand_ParseFail;
3556    }
3557    SMLoc E = Tok.getLoc();
3558    // Negative zero is encoded as the flag value INT32_MIN.
3559    int32_t Val = CE->getValue();
3560    if (isNegative && Val == 0)
3561      Val = INT32_MIN;
3562
3563    Operands.push_back(
3564      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3565
3566    return MatchOperand_Success;
3567  }
3568
3569
3570  bool haveEaten = false;
3571  bool isAdd = true;
3572  int Reg = -1;
3573  if (Tok.is(AsmToken::Plus)) {
3574    Parser.Lex(); // Eat the '+' token.
3575    haveEaten = true;
3576  } else if (Tok.is(AsmToken::Minus)) {
3577    Parser.Lex(); // Eat the '-' token.
3578    isAdd = false;
3579    haveEaten = true;
3580  }
3581  if (Parser.getTok().is(AsmToken::Identifier))
3582    Reg = tryParseRegister();
3583  if (Reg == -1) {
3584    if (!haveEaten)
3585      return MatchOperand_NoMatch;
3586    Error(Parser.getTok().getLoc(), "register expected");
3587    return MatchOperand_ParseFail;
3588  }
3589  SMLoc E = Parser.getTok().getLoc();
3590
3591  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3592                                                  0, S, E));
3593
3594  return MatchOperand_Success;
3595}
3596
3597/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3598/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3599/// when they refer multiple MIOperands inside a single one.
3600bool ARMAsmParser::
3601cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3602             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3603  // Rt, Rt2
3604  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3605  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3606  // Create a writeback register dummy placeholder.
3607  Inst.addOperand(MCOperand::CreateReg(0));
3608  // addr
3609  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3610  // pred
3611  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3612  return true;
3613}
3614
3615/// cvtT2StrdPre - Convert parsed operands to MCInst.
3616/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3617/// when they refer multiple MIOperands inside a single one.
3618bool ARMAsmParser::
3619cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3620             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3621  // Create a writeback register dummy placeholder.
3622  Inst.addOperand(MCOperand::CreateReg(0));
3623  // Rt, Rt2
3624  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3625  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3626  // addr
3627  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3628  // pred
3629  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3630  return true;
3631}
3632
3633/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3634/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3635/// when they refer multiple MIOperands inside a single one.
3636bool ARMAsmParser::
3637cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3638                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3639  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3640
3641  // Create a writeback register dummy placeholder.
3642  Inst.addOperand(MCOperand::CreateImm(0));
3643
3644  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3645  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3646  return true;
3647}
3648
3649/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3650/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3651/// when they refer multiple MIOperands inside a single one.
3652bool ARMAsmParser::
3653cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3654                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3655  // Create a writeback register dummy placeholder.
3656  Inst.addOperand(MCOperand::CreateImm(0));
3657  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3658  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3659  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3660  return true;
3661}
3662
3663/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3664/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3665/// when they refer multiple MIOperands inside a single one.
3666bool ARMAsmParser::
3667cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3668                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3669  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3670
3671  // Create a writeback register dummy placeholder.
3672  Inst.addOperand(MCOperand::CreateImm(0));
3673
3674  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3675  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3676  return true;
3677}
3678
3679/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3680/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3681/// when they refer multiple MIOperands inside a single one.
3682bool ARMAsmParser::
3683cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3684                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3685  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3686
3687  // Create a writeback register dummy placeholder.
3688  Inst.addOperand(MCOperand::CreateImm(0));
3689
3690  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3691  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3692  return true;
3693}
3694
3695
3696/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3697/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3698/// when they refer multiple MIOperands inside a single one.
3699bool ARMAsmParser::
3700cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3701                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3702  // Create a writeback register dummy placeholder.
3703  Inst.addOperand(MCOperand::CreateImm(0));
3704  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3705  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3706  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3707  return true;
3708}
3709
3710/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3711/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3712/// when they refer multiple MIOperands inside a single one.
3713bool ARMAsmParser::
3714cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3715                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3716  // Create a writeback register dummy placeholder.
3717  Inst.addOperand(MCOperand::CreateImm(0));
3718  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3719  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3720  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3721  return true;
3722}
3723
3724/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3725/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3726/// when they refer multiple MIOperands inside a single one.
3727bool ARMAsmParser::
3728cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3729                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3730  // Create a writeback register dummy placeholder.
3731  Inst.addOperand(MCOperand::CreateImm(0));
3732  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3733  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3734  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3735  return true;
3736}
3737
3738/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3739/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3740/// when they refer multiple MIOperands inside a single one.
3741bool ARMAsmParser::
3742cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3743                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3744  // Rt
3745  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3746  // Create a writeback register dummy placeholder.
3747  Inst.addOperand(MCOperand::CreateImm(0));
3748  // addr
3749  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3750  // offset
3751  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3752  // pred
3753  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3754  return true;
3755}
3756
3757/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3758/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3759/// when they refer multiple MIOperands inside a single one.
3760bool ARMAsmParser::
3761cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3762                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3763  // Rt
3764  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3765  // Create a writeback register dummy placeholder.
3766  Inst.addOperand(MCOperand::CreateImm(0));
3767  // addr
3768  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3769  // offset
3770  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3771  // pred
3772  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3773  return true;
3774}
3775
3776/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3777/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3778/// when they refer multiple MIOperands inside a single one.
3779bool ARMAsmParser::
3780cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3781                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3782  // Create a writeback register dummy placeholder.
3783  Inst.addOperand(MCOperand::CreateImm(0));
3784  // Rt
3785  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3786  // addr
3787  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3788  // offset
3789  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3790  // pred
3791  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3792  return true;
3793}
3794
3795/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3796/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3797/// when they refer multiple MIOperands inside a single one.
3798bool ARMAsmParser::
3799cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3800                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3801  // Create a writeback register dummy placeholder.
3802  Inst.addOperand(MCOperand::CreateImm(0));
3803  // Rt
3804  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3805  // addr
3806  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3807  // offset
3808  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3809  // pred
3810  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3811  return true;
3812}
3813
3814/// cvtLdrdPre - Convert parsed operands to MCInst.
3815/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3816/// when they refer multiple MIOperands inside a single one.
3817bool ARMAsmParser::
3818cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3819           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3820  // Rt, Rt2
3821  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3822  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3823  // Create a writeback register dummy placeholder.
3824  Inst.addOperand(MCOperand::CreateImm(0));
3825  // addr
3826  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3827  // pred
3828  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3829  return true;
3830}
3831
3832/// cvtStrdPre - Convert parsed operands to MCInst.
3833/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3834/// when they refer multiple MIOperands inside a single one.
3835bool ARMAsmParser::
3836cvtStrdPre(MCInst &Inst, unsigned Opcode,
3837           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3838  // Create a writeback register dummy placeholder.
3839  Inst.addOperand(MCOperand::CreateImm(0));
3840  // Rt, Rt2
3841  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3842  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3843  // addr
3844  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3845  // pred
3846  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3847  return true;
3848}
3849
3850/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3851/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3852/// when they refer multiple MIOperands inside a single one.
3853bool ARMAsmParser::
3854cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3855                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3856  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3857  // Create a writeback register dummy placeholder.
3858  Inst.addOperand(MCOperand::CreateImm(0));
3859  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3860  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3861  return true;
3862}
3863
3864/// cvtThumbMultiple- Convert parsed operands to MCInst.
3865/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3866/// when they refer multiple MIOperands inside a single one.
3867bool ARMAsmParser::
3868cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3869           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3870  // The second source operand must be the same register as the destination
3871  // operand.
3872  if (Operands.size() == 6 &&
3873      (((ARMOperand*)Operands[3])->getReg() !=
3874       ((ARMOperand*)Operands[5])->getReg()) &&
3875      (((ARMOperand*)Operands[3])->getReg() !=
3876       ((ARMOperand*)Operands[4])->getReg())) {
3877    Error(Operands[3]->getStartLoc(),
3878          "destination register must match source register");
3879    return false;
3880  }
3881  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3882  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3883  // If we have a three-operand form, make sure to set Rn to be the operand
3884  // that isn't the same as Rd.
3885  unsigned RegOp = 4;
3886  if (Operands.size() == 6 &&
3887      ((ARMOperand*)Operands[4])->getReg() ==
3888        ((ARMOperand*)Operands[3])->getReg())
3889    RegOp = 5;
3890  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3891  Inst.addOperand(Inst.getOperand(0));
3892  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3893
3894  return true;
3895}
3896
3897bool ARMAsmParser::
3898cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3899              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3900  // Vd
3901  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3902  // Create a writeback register dummy placeholder.
3903  Inst.addOperand(MCOperand::CreateImm(0));
3904  // Vn
3905  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3906  // pred
3907  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3908  return true;
3909}
3910
3911bool ARMAsmParser::
3912cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3913                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3914  // Vd
3915  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3916  // Create a writeback register dummy placeholder.
3917  Inst.addOperand(MCOperand::CreateImm(0));
3918  // Vn
3919  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3920  // Vm
3921  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3922  // pred
3923  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3924  return true;
3925}
3926
3927bool ARMAsmParser::
3928cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3929              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3930  // Create a writeback register dummy placeholder.
3931  Inst.addOperand(MCOperand::CreateImm(0));
3932  // Vn
3933  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3934  // Vt
3935  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3936  // pred
3937  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3938  return true;
3939}
3940
3941bool ARMAsmParser::
3942cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3943                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3944  // Create a writeback register dummy placeholder.
3945  Inst.addOperand(MCOperand::CreateImm(0));
3946  // Vn
3947  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3948  // Vm
3949  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3950  // Vt
3951  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3952  // pred
3953  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3954  return true;
3955}
3956
3957/// Parse an ARM memory expression, return false if successful else return true
3958/// or an error.  The first token must be a '[' when called.
3959bool ARMAsmParser::
3960parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3961  SMLoc S, E;
3962  assert(Parser.getTok().is(AsmToken::LBrac) &&
3963         "Token is not a Left Bracket");
3964  S = Parser.getTok().getLoc();
3965  Parser.Lex(); // Eat left bracket token.
3966
3967  const AsmToken &BaseRegTok = Parser.getTok();
3968  int BaseRegNum = tryParseRegister();
3969  if (BaseRegNum == -1)
3970    return Error(BaseRegTok.getLoc(), "register expected");
3971
3972  // The next token must either be a comma or a closing bracket.
3973  const AsmToken &Tok = Parser.getTok();
3974  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3975    return Error(Tok.getLoc(), "malformed memory operand");
3976
3977  if (Tok.is(AsmToken::RBrac)) {
3978    E = Tok.getLoc();
3979    Parser.Lex(); // Eat right bracket token.
3980
3981    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3982                                             0, 0, false, S, E));
3983
3984    // If there's a pre-indexing writeback marker, '!', just add it as a token
3985    // operand. It's rather odd, but syntactically valid.
3986    if (Parser.getTok().is(AsmToken::Exclaim)) {
3987      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3988      Parser.Lex(); // Eat the '!'.
3989    }
3990
3991    return false;
3992  }
3993
3994  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
3995  Parser.Lex(); // Eat the comma.
3996
3997  // If we have a ':', it's an alignment specifier.
3998  if (Parser.getTok().is(AsmToken::Colon)) {
3999    Parser.Lex(); // Eat the ':'.
4000    E = Parser.getTok().getLoc();
4001
4002    const MCExpr *Expr;
4003    if (getParser().ParseExpression(Expr))
4004     return true;
4005
4006    // The expression has to be a constant. Memory references with relocations
4007    // don't come through here, as they use the <label> forms of the relevant
4008    // instructions.
4009    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4010    if (!CE)
4011      return Error (E, "constant expression expected");
4012
4013    unsigned Align = 0;
4014    switch (CE->getValue()) {
4015    default:
4016      return Error(E,
4017                   "alignment specifier must be 16, 32, 64, 128, or 256 bits");
4018    case 16:  Align = 2; break;
4019    case 32:  Align = 4; break;
4020    case 64:  Align = 8; break;
4021    case 128: Align = 16; break;
4022    case 256: Align = 32; break;
4023    }
4024
4025    // Now we should have the closing ']'
4026    E = Parser.getTok().getLoc();
4027    if (Parser.getTok().isNot(AsmToken::RBrac))
4028      return Error(E, "']' expected");
4029    Parser.Lex(); // Eat right bracket token.
4030
4031    // Don't worry about range checking the value here. That's handled by
4032    // the is*() predicates.
4033    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
4034                                             ARM_AM::no_shift, 0, Align,
4035                                             false, S, E));
4036
4037    // If there's a pre-indexing writeback marker, '!', just add it as a token
4038    // operand.
4039    if (Parser.getTok().is(AsmToken::Exclaim)) {
4040      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4041      Parser.Lex(); // Eat the '!'.
4042    }
4043
4044    return false;
4045  }
4046
4047  // If we have a '#', it's an immediate offset, else assume it's a register
4048  // offset. Be friendly and also accept a plain integer (without a leading
4049  // hash) for gas compatibility.
4050  if (Parser.getTok().is(AsmToken::Hash) ||
4051      Parser.getTok().is(AsmToken::Dollar) ||
4052      Parser.getTok().is(AsmToken::Integer)) {
4053    if (Parser.getTok().isNot(AsmToken::Integer))
4054      Parser.Lex(); // Eat the '#'.
4055    E = Parser.getTok().getLoc();
4056
4057    bool isNegative = getParser().getTok().is(AsmToken::Minus);
4058    const MCExpr *Offset;
4059    if (getParser().ParseExpression(Offset))
4060     return true;
4061
4062    // The expression has to be a constant. Memory references with relocations
4063    // don't come through here, as they use the <label> forms of the relevant
4064    // instructions.
4065    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
4066    if (!CE)
4067      return Error (E, "constant expression expected");
4068
4069    // If the constant was #-0, represent it as INT32_MIN.
4070    int32_t Val = CE->getValue();
4071    if (isNegative && Val == 0)
4072      CE = MCConstantExpr::Create(INT32_MIN, getContext());
4073
4074    // Now we should have the closing ']'
4075    E = Parser.getTok().getLoc();
4076    if (Parser.getTok().isNot(AsmToken::RBrac))
4077      return Error(E, "']' expected");
4078    Parser.Lex(); // Eat right bracket token.
4079
4080    // Don't worry about range checking the value here. That's handled by
4081    // the is*() predicates.
4082    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
4083                                             ARM_AM::no_shift, 0, 0,
4084                                             false, S, E));
4085
4086    // If there's a pre-indexing writeback marker, '!', just add it as a token
4087    // operand.
4088    if (Parser.getTok().is(AsmToken::Exclaim)) {
4089      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4090      Parser.Lex(); // Eat the '!'.
4091    }
4092
4093    return false;
4094  }
4095
4096  // The register offset is optionally preceded by a '+' or '-'
4097  bool isNegative = false;
4098  if (Parser.getTok().is(AsmToken::Minus)) {
4099    isNegative = true;
4100    Parser.Lex(); // Eat the '-'.
4101  } else if (Parser.getTok().is(AsmToken::Plus)) {
4102    // Nothing to do.
4103    Parser.Lex(); // Eat the '+'.
4104  }
4105
4106  E = Parser.getTok().getLoc();
4107  int OffsetRegNum = tryParseRegister();
4108  if (OffsetRegNum == -1)
4109    return Error(E, "register expected");
4110
4111  // If there's a shift operator, handle it.
4112  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
4113  unsigned ShiftImm = 0;
4114  if (Parser.getTok().is(AsmToken::Comma)) {
4115    Parser.Lex(); // Eat the ','.
4116    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
4117      return true;
4118  }
4119
4120  // Now we should have the closing ']'
4121  E = Parser.getTok().getLoc();
4122  if (Parser.getTok().isNot(AsmToken::RBrac))
4123    return Error(E, "']' expected");
4124  Parser.Lex(); // Eat right bracket token.
4125
4126  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
4127                                           ShiftType, ShiftImm, 0, isNegative,
4128                                           S, E));
4129
4130  // If there's a pre-indexing writeback marker, '!', just add it as a token
4131  // operand.
4132  if (Parser.getTok().is(AsmToken::Exclaim)) {
4133    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
4134    Parser.Lex(); // Eat the '!'.
4135  }
4136
4137  return false;
4138}
4139
4140/// parseMemRegOffsetShift - one of these two:
4141///   ( lsl | lsr | asr | ror ) , # shift_amount
4142///   rrx
4143/// return true if it parses a shift otherwise it returns false.
4144bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
4145                                          unsigned &Amount) {
4146  SMLoc Loc = Parser.getTok().getLoc();
4147  const AsmToken &Tok = Parser.getTok();
4148  if (Tok.isNot(AsmToken::Identifier))
4149    return true;
4150  StringRef ShiftName = Tok.getString();
4151  if (ShiftName == "lsl" || ShiftName == "LSL" ||
4152      ShiftName == "asl" || ShiftName == "ASL")
4153    St = ARM_AM::lsl;
4154  else if (ShiftName == "lsr" || ShiftName == "LSR")
4155    St = ARM_AM::lsr;
4156  else if (ShiftName == "asr" || ShiftName == "ASR")
4157    St = ARM_AM::asr;
4158  else if (ShiftName == "ror" || ShiftName == "ROR")
4159    St = ARM_AM::ror;
4160  else if (ShiftName == "rrx" || ShiftName == "RRX")
4161    St = ARM_AM::rrx;
4162  else
4163    return Error(Loc, "illegal shift operator");
4164  Parser.Lex(); // Eat shift type token.
4165
4166  // rrx stands alone.
4167  Amount = 0;
4168  if (St != ARM_AM::rrx) {
4169    Loc = Parser.getTok().getLoc();
4170    // A '#' and a shift amount.
4171    const AsmToken &HashTok = Parser.getTok();
4172    if (HashTok.isNot(AsmToken::Hash) &&
4173        HashTok.isNot(AsmToken::Dollar))
4174      return Error(HashTok.getLoc(), "'#' expected");
4175    Parser.Lex(); // Eat hash token.
4176
4177    const MCExpr *Expr;
4178    if (getParser().ParseExpression(Expr))
4179      return true;
4180    // Range check the immediate.
4181    // lsl, ror: 0 <= imm <= 31
4182    // lsr, asr: 0 <= imm <= 32
4183    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
4184    if (!CE)
4185      return Error(Loc, "shift amount must be an immediate");
4186    int64_t Imm = CE->getValue();
4187    if (Imm < 0 ||
4188        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
4189        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
4190      return Error(Loc, "immediate shift value out of range");
4191    Amount = Imm;
4192  }
4193
4194  return false;
4195}
4196
4197/// parseFPImm - A floating point immediate expression operand.
4198ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4199parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4200  SMLoc S = Parser.getTok().getLoc();
4201
4202  if (Parser.getTok().isNot(AsmToken::Hash) &&
4203      Parser.getTok().isNot(AsmToken::Dollar))
4204    return MatchOperand_NoMatch;
4205
4206  // Disambiguate the VMOV forms that can accept an FP immediate.
4207  // vmov.f32 <sreg>, #imm
4208  // vmov.f64 <dreg>, #imm
4209  // vmov.f32 <dreg>, #imm  @ vector f32x2
4210  // vmov.f32 <qreg>, #imm  @ vector f32x4
4211  //
4212  // There are also the NEON VMOV instructions which expect an
4213  // integer constant. Make sure we don't try to parse an FPImm
4214  // for these:
4215  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4216  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4217  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4218                           TyOp->getToken() != ".f64"))
4219    return MatchOperand_NoMatch;
4220
4221  Parser.Lex(); // Eat the '#'.
4222
4223  // Handle negation, as that still comes through as a separate token.
4224  bool isNegative = false;
4225  if (Parser.getTok().is(AsmToken::Minus)) {
4226    isNegative = true;
4227    Parser.Lex();
4228  }
4229  const AsmToken &Tok = Parser.getTok();
4230  if (Tok.is(AsmToken::Real)) {
4231    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4232    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4233    // If we had a '-' in front, toggle the sign bit.
4234    IntVal ^= (uint64_t)isNegative << 63;
4235    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4236    Parser.Lex(); // Eat the token.
4237    if (Val == -1) {
4238      TokError("floating point value out of range");
4239      return MatchOperand_ParseFail;
4240    }
4241    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4242    return MatchOperand_Success;
4243  }
4244  if (Tok.is(AsmToken::Integer)) {
4245    int64_t Val = Tok.getIntVal();
4246    Parser.Lex(); // Eat the token.
4247    if (Val > 255 || Val < 0) {
4248      TokError("encoded floating point value out of range");
4249      return MatchOperand_ParseFail;
4250    }
4251    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4252    return MatchOperand_Success;
4253  }
4254
4255  TokError("invalid floating point immediate");
4256  return MatchOperand_ParseFail;
4257}
4258/// Parse a arm instruction operand.  For now this parses the operand regardless
4259/// of the mnemonic.
4260bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4261                                StringRef Mnemonic) {
4262  SMLoc S, E;
4263
4264  // Check if the current operand has a custom associated parser, if so, try to
4265  // custom parse the operand, or fallback to the general approach.
4266  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4267  if (ResTy == MatchOperand_Success)
4268    return false;
4269  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4270  // there was a match, but an error occurred, in which case, just return that
4271  // the operand parsing failed.
4272  if (ResTy == MatchOperand_ParseFail)
4273    return true;
4274
4275  switch (getLexer().getKind()) {
4276  default:
4277    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4278    return true;
4279  case AsmToken::Identifier: {
4280    // If this is VMRS, check for the apsr_nzcv operand.
4281    if (!tryParseRegisterWithWriteBack(Operands))
4282      return false;
4283    int Res = tryParseShiftRegister(Operands);
4284    if (Res == 0) // success
4285      return false;
4286    else if (Res == -1) // irrecoverable error
4287      return true;
4288    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4289      S = Parser.getTok().getLoc();
4290      Parser.Lex();
4291      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4292      return false;
4293    }
4294
4295    // Fall though for the Identifier case that is not a register or a
4296    // special name.
4297  }
4298  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4299  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4300  case AsmToken::String:  // quoted label names.
4301  case AsmToken::Dot: {   // . as a branch target
4302    // This was not a register so parse other operands that start with an
4303    // identifier (like labels) as expressions and create them as immediates.
4304    const MCExpr *IdVal;
4305    S = Parser.getTok().getLoc();
4306    if (getParser().ParseExpression(IdVal))
4307      return true;
4308    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4309    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4310    return false;
4311  }
4312  case AsmToken::LBrac:
4313    return parseMemory(Operands);
4314  case AsmToken::LCurly:
4315    return parseRegisterList(Operands);
4316  case AsmToken::Dollar:
4317  case AsmToken::Hash: {
4318    // #42 -> immediate.
4319    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4320    S = Parser.getTok().getLoc();
4321    Parser.Lex();
4322    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4323    const MCExpr *ImmVal;
4324    if (getParser().ParseExpression(ImmVal))
4325      return true;
4326    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4327    if (CE) {
4328      int32_t Val = CE->getValue();
4329      if (isNegative && Val == 0)
4330        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4331    }
4332    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4333    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4334    return false;
4335  }
4336  case AsmToken::Colon: {
4337    // ":lower16:" and ":upper16:" expression prefixes
4338    // FIXME: Check it's an expression prefix,
4339    // e.g. (FOO - :lower16:BAR) isn't legal.
4340    ARMMCExpr::VariantKind RefKind;
4341    if (parsePrefix(RefKind))
4342      return true;
4343
4344    const MCExpr *SubExprVal;
4345    if (getParser().ParseExpression(SubExprVal))
4346      return true;
4347
4348    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4349                                                   getContext());
4350    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4351    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4352    return false;
4353  }
4354  }
4355}
4356
4357// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4358//  :lower16: and :upper16:.
4359bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4360  RefKind = ARMMCExpr::VK_ARM_None;
4361
4362  // :lower16: and :upper16: modifiers
4363  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4364  Parser.Lex(); // Eat ':'
4365
4366  if (getLexer().isNot(AsmToken::Identifier)) {
4367    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4368    return true;
4369  }
4370
4371  StringRef IDVal = Parser.getTok().getIdentifier();
4372  if (IDVal == "lower16") {
4373    RefKind = ARMMCExpr::VK_ARM_LO16;
4374  } else if (IDVal == "upper16") {
4375    RefKind = ARMMCExpr::VK_ARM_HI16;
4376  } else {
4377    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4378    return true;
4379  }
4380  Parser.Lex();
4381
4382  if (getLexer().isNot(AsmToken::Colon)) {
4383    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4384    return true;
4385  }
4386  Parser.Lex(); // Eat the last ':'
4387  return false;
4388}
4389
4390/// \brief Given a mnemonic, split out possible predication code and carry
4391/// setting letters to form a canonical mnemonic and flags.
4392//
4393// FIXME: Would be nice to autogen this.
4394// FIXME: This is a bit of a maze of special cases.
4395StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4396                                      unsigned &PredicationCode,
4397                                      bool &CarrySetting,
4398                                      unsigned &ProcessorIMod,
4399                                      StringRef &ITMask) {
4400  PredicationCode = ARMCC::AL;
4401  CarrySetting = false;
4402  ProcessorIMod = 0;
4403
4404  // Ignore some mnemonics we know aren't predicated forms.
4405  //
4406  // FIXME: Would be nice to autogen this.
4407  if ((Mnemonic == "movs" && isThumb()) ||
4408      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4409      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4410      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4411      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4412      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4413      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4414      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal" ||
4415      Mnemonic == "fmuls")
4416    return Mnemonic;
4417
4418  // First, split out any predication code. Ignore mnemonics we know aren't
4419  // predicated but do have a carry-set and so weren't caught above.
4420  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4421      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4422      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4423      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4424    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4425      .Case("eq", ARMCC::EQ)
4426      .Case("ne", ARMCC::NE)
4427      .Case("hs", ARMCC::HS)
4428      .Case("cs", ARMCC::HS)
4429      .Case("lo", ARMCC::LO)
4430      .Case("cc", ARMCC::LO)
4431      .Case("mi", ARMCC::MI)
4432      .Case("pl", ARMCC::PL)
4433      .Case("vs", ARMCC::VS)
4434      .Case("vc", ARMCC::VC)
4435      .Case("hi", ARMCC::HI)
4436      .Case("ls", ARMCC::LS)
4437      .Case("ge", ARMCC::GE)
4438      .Case("lt", ARMCC::LT)
4439      .Case("gt", ARMCC::GT)
4440      .Case("le", ARMCC::LE)
4441      .Case("al", ARMCC::AL)
4442      .Default(~0U);
4443    if (CC != ~0U) {
4444      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4445      PredicationCode = CC;
4446    }
4447  }
4448
4449  // Next, determine if we have a carry setting bit. We explicitly ignore all
4450  // the instructions we know end in 's'.
4451  if (Mnemonic.endswith("s") &&
4452      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4453        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4454        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4455        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4456        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4457        Mnemonic == "fmrs" || Mnemonic == "fsqrts" || Mnemonic == "fsubs" ||
4458        Mnemonic == "fsts" || Mnemonic == "fcpys" || Mnemonic == "fdivs" ||
4459        Mnemonic == "fmuls" ||
4460        (Mnemonic == "movs" && isThumb()))) {
4461    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4462    CarrySetting = true;
4463  }
4464
4465  // The "cps" instruction can have a interrupt mode operand which is glued into
4466  // the mnemonic. Check if this is the case, split it and parse the imod op
4467  if (Mnemonic.startswith("cps")) {
4468    // Split out any imod code.
4469    unsigned IMod =
4470      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4471      .Case("ie", ARM_PROC::IE)
4472      .Case("id", ARM_PROC::ID)
4473      .Default(~0U);
4474    if (IMod != ~0U) {
4475      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4476      ProcessorIMod = IMod;
4477    }
4478  }
4479
4480  // The "it" instruction has the condition mask on the end of the mnemonic.
4481  if (Mnemonic.startswith("it")) {
4482    ITMask = Mnemonic.slice(2, Mnemonic.size());
4483    Mnemonic = Mnemonic.slice(0, 2);
4484  }
4485
4486  return Mnemonic;
4487}
4488
4489/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4490/// inclusion of carry set or predication code operands.
4491//
4492// FIXME: It would be nice to autogen this.
4493void ARMAsmParser::
4494getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4495                      bool &CanAcceptPredicationCode) {
4496  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4497      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4498      Mnemonic == "add" || Mnemonic == "adc" ||
4499      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4500      Mnemonic == "orr" || Mnemonic == "mvn" ||
4501      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4502      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4503      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4504                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4505                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4506    CanAcceptCarrySet = true;
4507  } else
4508    CanAcceptCarrySet = false;
4509
4510  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4511      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4512      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4513      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4514      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4515      (Mnemonic == "clrex" && !isThumb()) ||
4516      (Mnemonic == "nop" && isThumbOne()) ||
4517      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4518        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4519        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4520      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4521       !isThumb()) ||
4522      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4523    CanAcceptPredicationCode = false;
4524  } else
4525    CanAcceptPredicationCode = true;
4526
4527  if (isThumb()) {
4528    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4529        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4530      CanAcceptPredicationCode = false;
4531  }
4532}
4533
4534bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4535                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4536  // FIXME: This is all horribly hacky. We really need a better way to deal
4537  // with optional operands like this in the matcher table.
4538
4539  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4540  // another does not. Specifically, the MOVW instruction does not. So we
4541  // special case it here and remove the defaulted (non-setting) cc_out
4542  // operand if that's the instruction we're trying to match.
4543  //
4544  // We do this as post-processing of the explicit operands rather than just
4545  // conditionally adding the cc_out in the first place because we need
4546  // to check the type of the parsed immediate operand.
4547  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4548      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4549      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4550      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4551    return true;
4552
4553  // Register-register 'add' for thumb does not have a cc_out operand
4554  // when there are only two register operands.
4555  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4556      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4557      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4558      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4559    return true;
4560  // Register-register 'add' for thumb does not have a cc_out operand
4561  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4562  // have to check the immediate range here since Thumb2 has a variant
4563  // that can handle a different range and has a cc_out operand.
4564  if (((isThumb() && Mnemonic == "add") ||
4565       (isThumbTwo() && Mnemonic == "sub")) &&
4566      Operands.size() == 6 &&
4567      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4568      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4569      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4570      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4571      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4572       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4573    return true;
4574  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4575  // imm0_4095 variant. That's the least-preferred variant when
4576  // selecting via the generic "add" mnemonic, so to know that we
4577  // should remove the cc_out operand, we have to explicitly check that
4578  // it's not one of the other variants. Ugh.
4579  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4580      Operands.size() == 6 &&
4581      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4582      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4583      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4584    // Nest conditions rather than one big 'if' statement for readability.
4585    //
4586    // If either register is a high reg, it's either one of the SP
4587    // variants (handled above) or a 32-bit encoding, so we just
4588    // check against T3.
4589    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4590         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4591        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4592      return false;
4593    // If both registers are low, we're in an IT block, and the immediate is
4594    // in range, we should use encoding T1 instead, which has a cc_out.
4595    if (inITBlock() &&
4596        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4597        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4598        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4599      return false;
4600
4601    // Otherwise, we use encoding T4, which does not have a cc_out
4602    // operand.
4603    return true;
4604  }
4605
4606  // The thumb2 multiply instruction doesn't have a CCOut register, so
4607  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4608  // use the 16-bit encoding or not.
4609  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4610      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4611      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4612      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4613      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4614      // If the registers aren't low regs, the destination reg isn't the
4615      // same as one of the source regs, or the cc_out operand is zero
4616      // outside of an IT block, we have to use the 32-bit encoding, so
4617      // remove the cc_out operand.
4618      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4619       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4620       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4621       !inITBlock() ||
4622       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4623        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4624        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4625        static_cast<ARMOperand*>(Operands[4])->getReg())))
4626    return true;
4627
4628  // Also check the 'mul' syntax variant that doesn't specify an explicit
4629  // destination register.
4630  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4631      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4632      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4633      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4634      // If the registers aren't low regs  or the cc_out operand is zero
4635      // outside of an IT block, we have to use the 32-bit encoding, so
4636      // remove the cc_out operand.
4637      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4638       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4639       !inITBlock()))
4640    return true;
4641
4642
4643
4644  // Register-register 'add/sub' for thumb does not have a cc_out operand
4645  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4646  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4647  // right, this will result in better diagnostics (which operand is off)
4648  // anyway.
4649  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4650      (Operands.size() == 5 || Operands.size() == 6) &&
4651      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4652      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4653      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4654    return true;
4655
4656  return false;
4657}
4658
4659static bool isDataTypeToken(StringRef Tok) {
4660  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4661    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4662    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4663    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4664    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4665    Tok == ".f" || Tok == ".d";
4666}
4667
4668// FIXME: This bit should probably be handled via an explicit match class
4669// in the .td files that matches the suffix instead of having it be
4670// a literal string token the way it is now.
4671static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4672  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4673}
4674
4675static void applyMnemonicAliases(StringRef &Mnemonic, unsigned Features);
4676/// Parse an arm instruction mnemonic followed by its operands.
4677bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4678                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4679  // Apply mnemonic aliases before doing anything else, as the destination
4680  // mnemnonic may include suffices and we want to handle them normally.
4681  // The generic tblgen'erated code does this later, at the start of
4682  // MatchInstructionImpl(), but that's too late for aliases that include
4683  // any sort of suffix.
4684  unsigned AvailableFeatures = getAvailableFeatures();
4685  applyMnemonicAliases(Name, AvailableFeatures);
4686
4687  // First check for the ARM-specific .req directive.
4688  if (Parser.getTok().is(AsmToken::Identifier) &&
4689      Parser.getTok().getIdentifier() == ".req") {
4690    parseDirectiveReq(Name, NameLoc);
4691    // We always return 'error' for this, as we're done with this
4692    // statement and don't need to match the 'instruction."
4693    return true;
4694  }
4695
4696  // Create the leading tokens for the mnemonic, split by '.' characters.
4697  size_t Start = 0, Next = Name.find('.');
4698  StringRef Mnemonic = Name.slice(Start, Next);
4699
4700  // Split out the predication code and carry setting flag from the mnemonic.
4701  unsigned PredicationCode;
4702  unsigned ProcessorIMod;
4703  bool CarrySetting;
4704  StringRef ITMask;
4705  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4706                           ProcessorIMod, ITMask);
4707
4708  // In Thumb1, only the branch (B) instruction can be predicated.
4709  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4710    Parser.EatToEndOfStatement();
4711    return Error(NameLoc, "conditional execution not supported in Thumb1");
4712  }
4713
4714  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4715
4716  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4717  // is the mask as it will be for the IT encoding if the conditional
4718  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4719  // where the conditional bit0 is zero, the instruction post-processing
4720  // will adjust the mask accordingly.
4721  if (Mnemonic == "it") {
4722    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4723    if (ITMask.size() > 3) {
4724      Parser.EatToEndOfStatement();
4725      return Error(Loc, "too many conditions on IT instruction");
4726    }
4727    unsigned Mask = 8;
4728    for (unsigned i = ITMask.size(); i != 0; --i) {
4729      char pos = ITMask[i - 1];
4730      if (pos != 't' && pos != 'e') {
4731        Parser.EatToEndOfStatement();
4732        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4733      }
4734      Mask >>= 1;
4735      if (ITMask[i - 1] == 't')
4736        Mask |= 8;
4737    }
4738    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4739  }
4740
4741  // FIXME: This is all a pretty gross hack. We should automatically handle
4742  // optional operands like this via tblgen.
4743
4744  // Next, add the CCOut and ConditionCode operands, if needed.
4745  //
4746  // For mnemonics which can ever incorporate a carry setting bit or predication
4747  // code, our matching model involves us always generating CCOut and
4748  // ConditionCode operands to match the mnemonic "as written" and then we let
4749  // the matcher deal with finding the right instruction or generating an
4750  // appropriate error.
4751  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4752  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4753
4754  // If we had a carry-set on an instruction that can't do that, issue an
4755  // error.
4756  if (!CanAcceptCarrySet && CarrySetting) {
4757    Parser.EatToEndOfStatement();
4758    return Error(NameLoc, "instruction '" + Mnemonic +
4759                 "' can not set flags, but 's' suffix specified");
4760  }
4761  // If we had a predication code on an instruction that can't do that, issue an
4762  // error.
4763  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4764    Parser.EatToEndOfStatement();
4765    return Error(NameLoc, "instruction '" + Mnemonic +
4766                 "' is not predicable, but condition code specified");
4767  }
4768
4769  // Add the carry setting operand, if necessary.
4770  if (CanAcceptCarrySet) {
4771    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4772    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4773                                               Loc));
4774  }
4775
4776  // Add the predication code operand, if necessary.
4777  if (CanAcceptPredicationCode) {
4778    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4779                                      CarrySetting);
4780    Operands.push_back(ARMOperand::CreateCondCode(
4781                         ARMCC::CondCodes(PredicationCode), Loc));
4782  }
4783
4784  // Add the processor imod operand, if necessary.
4785  if (ProcessorIMod) {
4786    Operands.push_back(ARMOperand::CreateImm(
4787          MCConstantExpr::Create(ProcessorIMod, getContext()),
4788                                 NameLoc, NameLoc));
4789  }
4790
4791  // Add the remaining tokens in the mnemonic.
4792  while (Next != StringRef::npos) {
4793    Start = Next;
4794    Next = Name.find('.', Start + 1);
4795    StringRef ExtraToken = Name.slice(Start, Next);
4796
4797    // Some NEON instructions have an optional datatype suffix that is
4798    // completely ignored. Check for that.
4799    if (isDataTypeToken(ExtraToken) &&
4800        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4801      continue;
4802
4803    if (ExtraToken != ".n") {
4804      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4805      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4806    }
4807  }
4808
4809  // Read the remaining operands.
4810  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4811    // Read the first operand.
4812    if (parseOperand(Operands, Mnemonic)) {
4813      Parser.EatToEndOfStatement();
4814      return true;
4815    }
4816
4817    while (getLexer().is(AsmToken::Comma)) {
4818      Parser.Lex();  // Eat the comma.
4819
4820      // Parse and remember the operand.
4821      if (parseOperand(Operands, Mnemonic)) {
4822        Parser.EatToEndOfStatement();
4823        return true;
4824      }
4825    }
4826  }
4827
4828  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4829    SMLoc Loc = getLexer().getLoc();
4830    Parser.EatToEndOfStatement();
4831    return Error(Loc, "unexpected token in argument list");
4832  }
4833
4834  Parser.Lex(); // Consume the EndOfStatement
4835
4836  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4837  // do and don't have a cc_out optional-def operand. With some spot-checks
4838  // of the operand list, we can figure out which variant we're trying to
4839  // parse and adjust accordingly before actually matching. We shouldn't ever
4840  // try to remove a cc_out operand that was explicitly set on the the
4841  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4842  // table driven matcher doesn't fit well with the ARM instruction set.
4843  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4844    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4845    Operands.erase(Operands.begin() + 1);
4846    delete Op;
4847  }
4848
4849  // ARM mode 'blx' need special handling, as the register operand version
4850  // is predicable, but the label operand version is not. So, we can't rely
4851  // on the Mnemonic based checking to correctly figure out when to put
4852  // a k_CondCode operand in the list. If we're trying to match the label
4853  // version, remove the k_CondCode operand here.
4854  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4855      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4856    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4857    Operands.erase(Operands.begin() + 1);
4858    delete Op;
4859  }
4860
4861  // The vector-compare-to-zero instructions have a literal token "#0" at
4862  // the end that comes to here as an immediate operand. Convert it to a
4863  // token to play nicely with the matcher.
4864  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4865      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4866      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4867    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4868    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4869    if (CE && CE->getValue() == 0) {
4870      Operands.erase(Operands.begin() + 5);
4871      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4872      delete Op;
4873    }
4874  }
4875  // VCMP{E} does the same thing, but with a different operand count.
4876  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4877      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4878    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4879    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4880    if (CE && CE->getValue() == 0) {
4881      Operands.erase(Operands.begin() + 4);
4882      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4883      delete Op;
4884    }
4885  }
4886  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4887  // end. Convert it to a token here. Take care not to convert those
4888  // that should hit the Thumb2 encoding.
4889  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4890      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4891      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4892      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4893    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4894    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4895    if (CE && CE->getValue() == 0 &&
4896        (isThumbOne() ||
4897         // The cc_out operand matches the IT block.
4898         ((inITBlock() != CarrySetting) &&
4899         // Neither register operand is a high register.
4900         (isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4901          isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()))))){
4902      Operands.erase(Operands.begin() + 5);
4903      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4904      delete Op;
4905    }
4906  }
4907
4908  return false;
4909}
4910
4911// Validate context-sensitive operand constraints.
4912
4913// return 'true' if register list contains non-low GPR registers,
4914// 'false' otherwise. If Reg is in the register list or is HiReg, set
4915// 'containsReg' to true.
4916static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4917                                 unsigned HiReg, bool &containsReg) {
4918  containsReg = false;
4919  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4920    unsigned OpReg = Inst.getOperand(i).getReg();
4921    if (OpReg == Reg)
4922      containsReg = true;
4923    // Anything other than a low register isn't legal here.
4924    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4925      return true;
4926  }
4927  return false;
4928}
4929
4930// Check if the specified regisgter is in the register list of the inst,
4931// starting at the indicated operand number.
4932static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4933  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4934    unsigned OpReg = Inst.getOperand(i).getReg();
4935    if (OpReg == Reg)
4936      return true;
4937  }
4938  return false;
4939}
4940
4941// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4942// the ARMInsts array) instead. Getting that here requires awkward
4943// API changes, though. Better way?
4944namespace llvm {
4945extern const MCInstrDesc ARMInsts[];
4946}
4947static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4948  return ARMInsts[Opcode];
4949}
4950
4951// FIXME: We would really like to be able to tablegen'erate this.
4952bool ARMAsmParser::
4953validateInstruction(MCInst &Inst,
4954                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4955  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4956  SMLoc Loc = Operands[0]->getStartLoc();
4957  // Check the IT block state first.
4958  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4959  // being allowed in IT blocks, but not being predicable.  It just always
4960  // executes.
4961  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4962    unsigned bit = 1;
4963    if (ITState.FirstCond)
4964      ITState.FirstCond = false;
4965    else
4966      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4967    // The instruction must be predicable.
4968    if (!MCID.isPredicable())
4969      return Error(Loc, "instructions in IT block must be predicable");
4970    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4971    unsigned ITCond = bit ? ITState.Cond :
4972      ARMCC::getOppositeCondition(ITState.Cond);
4973    if (Cond != ITCond) {
4974      // Find the condition code Operand to get its SMLoc information.
4975      SMLoc CondLoc;
4976      for (unsigned i = 1; i < Operands.size(); ++i)
4977        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4978          CondLoc = Operands[i]->getStartLoc();
4979      return Error(CondLoc, "incorrect condition in IT block; got '" +
4980                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4981                   "', but expected '" +
4982                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4983    }
4984  // Check for non-'al' condition codes outside of the IT block.
4985  } else if (isThumbTwo() && MCID.isPredicable() &&
4986             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4987             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4988             Inst.getOpcode() != ARM::t2B)
4989    return Error(Loc, "predicated instructions must be in IT block");
4990
4991  switch (Inst.getOpcode()) {
4992  case ARM::LDRD:
4993  case ARM::LDRD_PRE:
4994  case ARM::LDRD_POST:
4995  case ARM::LDREXD: {
4996    // Rt2 must be Rt + 1.
4997    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4998    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4999    if (Rt2 != Rt + 1)
5000      return Error(Operands[3]->getStartLoc(),
5001                   "destination operands must be sequential");
5002    return false;
5003  }
5004  case ARM::STRD: {
5005    // Rt2 must be Rt + 1.
5006    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
5007    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5008    if (Rt2 != Rt + 1)
5009      return Error(Operands[3]->getStartLoc(),
5010                   "source operands must be sequential");
5011    return false;
5012  }
5013  case ARM::STRD_PRE:
5014  case ARM::STRD_POST:
5015  case ARM::STREXD: {
5016    // Rt2 must be Rt + 1.
5017    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
5018    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
5019    if (Rt2 != Rt + 1)
5020      return Error(Operands[3]->getStartLoc(),
5021                   "source operands must be sequential");
5022    return false;
5023  }
5024  case ARM::SBFX:
5025  case ARM::UBFX: {
5026    // width must be in range [1, 32-lsb]
5027    unsigned lsb = Inst.getOperand(2).getImm();
5028    unsigned widthm1 = Inst.getOperand(3).getImm();
5029    if (widthm1 >= 32 - lsb)
5030      return Error(Operands[5]->getStartLoc(),
5031                   "bitfield width must be in range [1,32-lsb]");
5032    return false;
5033  }
5034  case ARM::tLDMIA: {
5035    // If we're parsing Thumb2, the .w variant is available and handles
5036    // most cases that are normally illegal for a Thumb1 LDM
5037    // instruction. We'll make the transformation in processInstruction()
5038    // if necessary.
5039    //
5040    // Thumb LDM instructions are writeback iff the base register is not
5041    // in the register list.
5042    unsigned Rn = Inst.getOperand(0).getReg();
5043    bool hasWritebackToken =
5044      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5045       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5046    bool listContainsBase;
5047    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
5048      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
5049                   "registers must be in range r0-r7");
5050    // If we should have writeback, then there should be a '!' token.
5051    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
5052      return Error(Operands[2]->getStartLoc(),
5053                   "writeback operator '!' expected");
5054    // If we should not have writeback, there must not be a '!'. This is
5055    // true even for the 32-bit wide encodings.
5056    if (listContainsBase && hasWritebackToken)
5057      return Error(Operands[3]->getStartLoc(),
5058                   "writeback operator '!' not allowed when base register "
5059                   "in register list");
5060
5061    break;
5062  }
5063  case ARM::t2LDMIA_UPD: {
5064    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
5065      return Error(Operands[4]->getStartLoc(),
5066                   "writeback operator '!' not allowed when base register "
5067                   "in register list");
5068    break;
5069  }
5070  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
5071  // so only issue a diagnostic for thumb1. The instructions will be
5072  // switched to the t2 encodings in processInstruction() if necessary.
5073  case ARM::tPOP: {
5074    bool listContainsBase;
5075    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
5076        !isThumbTwo())
5077      return Error(Operands[2]->getStartLoc(),
5078                   "registers must be in range r0-r7 or pc");
5079    break;
5080  }
5081  case ARM::tPUSH: {
5082    bool listContainsBase;
5083    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
5084        !isThumbTwo())
5085      return Error(Operands[2]->getStartLoc(),
5086                   "registers must be in range r0-r7 or lr");
5087    break;
5088  }
5089  case ARM::tSTMIA_UPD: {
5090    bool listContainsBase;
5091    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
5092      return Error(Operands[4]->getStartLoc(),
5093                   "registers must be in range r0-r7");
5094    break;
5095  }
5096  }
5097
5098  return false;
5099}
5100
5101static unsigned getRealVSTLNOpcode(unsigned Opc) {
5102  switch(Opc) {
5103  default: assert(0 && "unexpected opcode!");
5104  // VST1LN
5105  case ARM::VST1LNdWB_fixed_Asm_8:  case ARM::VST1LNdWB_fixed_Asm_P8:
5106  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5107  case ARM::VST1LNdWB_fixed_Asm_U8:
5108    return ARM::VST1LNd8_UPD;
5109  case ARM::VST1LNdWB_fixed_Asm_16:  case ARM::VST1LNdWB_fixed_Asm_P16:
5110  case ARM::VST1LNdWB_fixed_Asm_I16: case ARM::VST1LNdWB_fixed_Asm_S16:
5111  case ARM::VST1LNdWB_fixed_Asm_U16:
5112    return ARM::VST1LNd16_UPD;
5113  case ARM::VST1LNdWB_fixed_Asm_32:  case ARM::VST1LNdWB_fixed_Asm_F:
5114  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5115  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32:
5116    return ARM::VST1LNd32_UPD;
5117  case ARM::VST1LNdWB_register_Asm_8:  case ARM::VST1LNdWB_register_Asm_P8:
5118  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5119  case ARM::VST1LNdWB_register_Asm_U8:
5120    return ARM::VST1LNd8_UPD;
5121  case ARM::VST1LNdWB_register_Asm_16:  case ARM::VST1LNdWB_register_Asm_P16:
5122  case ARM::VST1LNdWB_register_Asm_I16: case ARM::VST1LNdWB_register_Asm_S16:
5123  case ARM::VST1LNdWB_register_Asm_U16:
5124    return ARM::VST1LNd16_UPD;
5125  case ARM::VST1LNdWB_register_Asm_32:  case ARM::VST1LNdWB_register_Asm_F:
5126  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5127  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32:
5128    return ARM::VST1LNd32_UPD;
5129  case ARM::VST1LNdAsm_8:  case ARM::VST1LNdAsm_P8:
5130  case ARM::VST1LNdAsm_I8: case ARM::VST1LNdAsm_S8:
5131  case ARM::VST1LNdAsm_U8:
5132    return ARM::VST1LNd8;
5133  case ARM::VST1LNdAsm_16:  case ARM::VST1LNdAsm_P16:
5134  case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5135  case ARM::VST1LNdAsm_U16:
5136    return ARM::VST1LNd16;
5137  case ARM::VST1LNdAsm_32:  case ARM::VST1LNdAsm_F:
5138  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32:
5139  case ARM::VST1LNdAsm_S32: case ARM::VST1LNdAsm_U32:
5140    return ARM::VST1LNd32;
5141
5142  // VST2LN
5143  case ARM::VST2LNdWB_fixed_Asm_8:  case ARM::VST2LNdWB_fixed_Asm_P8:
5144  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5145  case ARM::VST2LNdWB_fixed_Asm_U8:
5146    return ARM::VST2LNd8_UPD;
5147  case ARM::VST2LNdWB_fixed_Asm_16:  case ARM::VST2LNdWB_fixed_Asm_P16:
5148  case ARM::VST2LNdWB_fixed_Asm_I16: case ARM::VST2LNdWB_fixed_Asm_S16:
5149  case ARM::VST2LNdWB_fixed_Asm_U16:
5150    return ARM::VST2LNd16_UPD;
5151  case ARM::VST2LNdWB_fixed_Asm_32:  case ARM::VST2LNdWB_fixed_Asm_F:
5152  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5153  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32:
5154    return ARM::VST2LNd32_UPD;
5155  case ARM::VST2LNdWB_register_Asm_8:  case ARM::VST2LNdWB_register_Asm_P8:
5156  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5157  case ARM::VST2LNdWB_register_Asm_U8:
5158    return ARM::VST2LNd8_UPD;
5159  case ARM::VST2LNdWB_register_Asm_16:  case ARM::VST2LNdWB_register_Asm_P16:
5160  case ARM::VST2LNdWB_register_Asm_I16: case ARM::VST2LNdWB_register_Asm_S16:
5161  case ARM::VST2LNdWB_register_Asm_U16:
5162    return ARM::VST2LNd16_UPD;
5163  case ARM::VST2LNdWB_register_Asm_32:  case ARM::VST2LNdWB_register_Asm_F:
5164  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5165  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32:
5166    return ARM::VST2LNd32_UPD;
5167  case ARM::VST2LNdAsm_8:  case ARM::VST2LNdAsm_P8:
5168  case ARM::VST2LNdAsm_I8: case ARM::VST2LNdAsm_S8:
5169  case ARM::VST2LNdAsm_U8:
5170    return ARM::VST2LNd8;
5171  case ARM::VST2LNdAsm_16:  case ARM::VST2LNdAsm_P16:
5172  case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5173  case ARM::VST2LNdAsm_U16:
5174    return ARM::VST2LNd16;
5175  case ARM::VST2LNdAsm_32:  case ARM::VST2LNdAsm_F:
5176  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32:
5177  case ARM::VST2LNdAsm_S32: case ARM::VST2LNdAsm_U32:
5178    return ARM::VST2LNd32;
5179  }
5180}
5181
5182static unsigned getRealVLDLNOpcode(unsigned Opc) {
5183  switch(Opc) {
5184  default: assert(0 && "unexpected opcode!");
5185  // VLD1LN
5186  case ARM::VLD1LNdWB_fixed_Asm_8:  case ARM::VLD1LNdWB_fixed_Asm_P8:
5187  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5188  case ARM::VLD1LNdWB_fixed_Asm_U8:
5189    return ARM::VLD1LNd8_UPD;
5190  case ARM::VLD1LNdWB_fixed_Asm_16:  case ARM::VLD1LNdWB_fixed_Asm_P16:
5191  case ARM::VLD1LNdWB_fixed_Asm_I16: case ARM::VLD1LNdWB_fixed_Asm_S16:
5192  case ARM::VLD1LNdWB_fixed_Asm_U16:
5193    return ARM::VLD1LNd16_UPD;
5194  case ARM::VLD1LNdWB_fixed_Asm_32:  case ARM::VLD1LNdWB_fixed_Asm_F:
5195  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5196  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32:
5197    return ARM::VLD1LNd32_UPD;
5198  case ARM::VLD1LNdWB_register_Asm_8:  case ARM::VLD1LNdWB_register_Asm_P8:
5199  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5200  case ARM::VLD1LNdWB_register_Asm_U8:
5201    return ARM::VLD1LNd8_UPD;
5202  case ARM::VLD1LNdWB_register_Asm_16:  case ARM::VLD1LNdWB_register_Asm_P16:
5203  case ARM::VLD1LNdWB_register_Asm_I16: case ARM::VLD1LNdWB_register_Asm_S16:
5204  case ARM::VLD1LNdWB_register_Asm_U16:
5205    return ARM::VLD1LNd16_UPD;
5206  case ARM::VLD1LNdWB_register_Asm_32:  case ARM::VLD1LNdWB_register_Asm_F:
5207  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5208  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32:
5209    return ARM::VLD1LNd32_UPD;
5210  case ARM::VLD1LNdAsm_8:  case ARM::VLD1LNdAsm_P8:
5211  case ARM::VLD1LNdAsm_I8: case ARM::VLD1LNdAsm_S8:
5212  case ARM::VLD1LNdAsm_U8:
5213    return ARM::VLD1LNd8;
5214  case ARM::VLD1LNdAsm_16:  case ARM::VLD1LNdAsm_P16:
5215  case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5216  case ARM::VLD1LNdAsm_U16:
5217    return ARM::VLD1LNd16;
5218  case ARM::VLD1LNdAsm_32:  case ARM::VLD1LNdAsm_F:
5219  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32:
5220  case ARM::VLD1LNdAsm_S32: case ARM::VLD1LNdAsm_U32:
5221    return ARM::VLD1LNd32;
5222
5223  // VLD2LN
5224  case ARM::VLD2LNdWB_fixed_Asm_8:  case ARM::VLD2LNdWB_fixed_Asm_P8:
5225  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5226  case ARM::VLD2LNdWB_fixed_Asm_U8:
5227    return ARM::VLD2LNd8_UPD;
5228  case ARM::VLD2LNdWB_fixed_Asm_16:  case ARM::VLD2LNdWB_fixed_Asm_P16:
5229  case ARM::VLD2LNdWB_fixed_Asm_I16: case ARM::VLD2LNdWB_fixed_Asm_S16:
5230  case ARM::VLD2LNdWB_fixed_Asm_U16:
5231    return ARM::VLD2LNd16_UPD;
5232  case ARM::VLD2LNdWB_fixed_Asm_32:  case ARM::VLD2LNdWB_fixed_Asm_F:
5233  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5234  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32:
5235    return ARM::VLD2LNd32_UPD;
5236  case ARM::VLD2LNdWB_register_Asm_8:  case ARM::VLD2LNdWB_register_Asm_P8:
5237  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5238  case ARM::VLD2LNdWB_register_Asm_U8:
5239    return ARM::VLD2LNd8_UPD;
5240  case ARM::VLD2LNdWB_register_Asm_16:  case ARM::VLD2LNdWB_register_Asm_P16:
5241  case ARM::VLD2LNdWB_register_Asm_I16: case ARM::VLD2LNdWB_register_Asm_S16:
5242  case ARM::VLD2LNdWB_register_Asm_U16:
5243    return ARM::VLD2LNd16_UPD;
5244  case ARM::VLD2LNdWB_register_Asm_32:  case ARM::VLD2LNdWB_register_Asm_F:
5245  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5246  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32:
5247    return ARM::VLD2LNd32_UPD;
5248  case ARM::VLD2LNdAsm_8:  case ARM::VLD2LNdAsm_P8:
5249  case ARM::VLD2LNdAsm_I8: case ARM::VLD2LNdAsm_S8:
5250  case ARM::VLD2LNdAsm_U8:
5251    return ARM::VLD2LNd8;
5252  case ARM::VLD2LNdAsm_16:  case ARM::VLD2LNdAsm_P16:
5253  case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5254  case ARM::VLD2LNdAsm_U16:
5255    return ARM::VLD2LNd16;
5256  case ARM::VLD2LNdAsm_32:  case ARM::VLD2LNdAsm_F:
5257  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32:
5258  case ARM::VLD2LNdAsm_S32: case ARM::VLD2LNdAsm_U32:
5259    return ARM::VLD2LNd32;
5260  }
5261}
5262
5263bool ARMAsmParser::
5264processInstruction(MCInst &Inst,
5265                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
5266  switch (Inst.getOpcode()) {
5267  // Handle NEON VST complex aliases.
5268  case ARM::VST1LNdWB_register_Asm_8: case ARM::VST1LNdWB_register_Asm_P8:
5269  case ARM::VST1LNdWB_register_Asm_I8: case ARM::VST1LNdWB_register_Asm_S8:
5270  case ARM::VST1LNdWB_register_Asm_U8: case ARM::VST1LNdWB_register_Asm_16:
5271  case ARM::VST1LNdWB_register_Asm_P16: case ARM::VST1LNdWB_register_Asm_I16:
5272  case ARM::VST1LNdWB_register_Asm_S16: case ARM::VST1LNdWB_register_Asm_U16:
5273  case ARM::VST1LNdWB_register_Asm_32: case ARM::VST1LNdWB_register_Asm_F:
5274  case ARM::VST1LNdWB_register_Asm_F32: case ARM::VST1LNdWB_register_Asm_I32:
5275  case ARM::VST1LNdWB_register_Asm_S32: case ARM::VST1LNdWB_register_Asm_U32: {
5276    MCInst TmpInst;
5277    // Shuffle the operands around so the lane index operand is in the
5278    // right place.
5279    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5280    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5281    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5282    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5283    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5284    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5285    TmpInst.addOperand(Inst.getOperand(1)); // lane
5286    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5287    TmpInst.addOperand(Inst.getOperand(6));
5288    Inst = TmpInst;
5289    return true;
5290  }
5291
5292  case ARM::VST2LNdWB_register_Asm_8: case ARM::VST2LNdWB_register_Asm_P8:
5293  case ARM::VST2LNdWB_register_Asm_I8: case ARM::VST2LNdWB_register_Asm_S8:
5294  case ARM::VST2LNdWB_register_Asm_U8: case ARM::VST2LNdWB_register_Asm_16:
5295  case ARM::VST2LNdWB_register_Asm_P16: case ARM::VST2LNdWB_register_Asm_I16:
5296  case ARM::VST2LNdWB_register_Asm_S16: case ARM::VST2LNdWB_register_Asm_U16:
5297  case ARM::VST2LNdWB_register_Asm_32: case ARM::VST2LNdWB_register_Asm_F:
5298  case ARM::VST2LNdWB_register_Asm_F32: case ARM::VST2LNdWB_register_Asm_I32:
5299  case ARM::VST2LNdWB_register_Asm_S32: case ARM::VST2LNdWB_register_Asm_U32: {
5300    MCInst TmpInst;
5301    // Shuffle the operands around so the lane index operand is in the
5302    // right place.
5303    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5304    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5305    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5306    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5307    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5308    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5309    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5310    TmpInst.addOperand(Inst.getOperand(1)); // lane
5311    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5312    TmpInst.addOperand(Inst.getOperand(6));
5313    Inst = TmpInst;
5314    return true;
5315  }
5316  case ARM::VST1LNdWB_fixed_Asm_8: case ARM::VST1LNdWB_fixed_Asm_P8:
5317  case ARM::VST1LNdWB_fixed_Asm_I8: case ARM::VST1LNdWB_fixed_Asm_S8:
5318  case ARM::VST1LNdWB_fixed_Asm_U8: case ARM::VST1LNdWB_fixed_Asm_16:
5319  case ARM::VST1LNdWB_fixed_Asm_P16: case ARM::VST1LNdWB_fixed_Asm_I16:
5320  case ARM::VST1LNdWB_fixed_Asm_S16: case ARM::VST1LNdWB_fixed_Asm_U16:
5321  case ARM::VST1LNdWB_fixed_Asm_32: case ARM::VST1LNdWB_fixed_Asm_F:
5322  case ARM::VST1LNdWB_fixed_Asm_F32: case ARM::VST1LNdWB_fixed_Asm_I32:
5323  case ARM::VST1LNdWB_fixed_Asm_S32: case ARM::VST1LNdWB_fixed_Asm_U32: {
5324    MCInst TmpInst;
5325    // Shuffle the operands around so the lane index operand is in the
5326    // right place.
5327    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5328    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5329    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5330    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5331    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5332    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5333    TmpInst.addOperand(Inst.getOperand(1)); // lane
5334    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5335    TmpInst.addOperand(Inst.getOperand(5));
5336    Inst = TmpInst;
5337    return true;
5338  }
5339
5340  case ARM::VST2LNdWB_fixed_Asm_8: case ARM::VST2LNdWB_fixed_Asm_P8:
5341  case ARM::VST2LNdWB_fixed_Asm_I8: case ARM::VST2LNdWB_fixed_Asm_S8:
5342  case ARM::VST2LNdWB_fixed_Asm_U8: case ARM::VST2LNdWB_fixed_Asm_16:
5343  case ARM::VST2LNdWB_fixed_Asm_P16: case ARM::VST2LNdWB_fixed_Asm_I16:
5344  case ARM::VST2LNdWB_fixed_Asm_S16: case ARM::VST2LNdWB_fixed_Asm_U16:
5345  case ARM::VST2LNdWB_fixed_Asm_32: case ARM::VST2LNdWB_fixed_Asm_F:
5346  case ARM::VST2LNdWB_fixed_Asm_F32: case ARM::VST2LNdWB_fixed_Asm_I32:
5347  case ARM::VST2LNdWB_fixed_Asm_S32: case ARM::VST2LNdWB_fixed_Asm_U32: {
5348    MCInst TmpInst;
5349    // Shuffle the operands around so the lane index operand is in the
5350    // right place.
5351    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5352    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5353    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5354    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5355    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5356    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5357    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5358    TmpInst.addOperand(Inst.getOperand(1)); // lane
5359    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5360    TmpInst.addOperand(Inst.getOperand(5));
5361    Inst = TmpInst;
5362    return true;
5363  }
5364  case ARM::VST1LNdAsm_8: case ARM::VST1LNdAsm_P8: case ARM::VST1LNdAsm_I8:
5365  case ARM::VST1LNdAsm_S8: case ARM::VST1LNdAsm_U8: case ARM::VST1LNdAsm_16:
5366  case ARM::VST1LNdAsm_P16: case ARM::VST1LNdAsm_I16: case ARM::VST1LNdAsm_S16:
5367  case ARM::VST1LNdAsm_U16: case ARM::VST1LNdAsm_32: case ARM::VST1LNdAsm_F:
5368  case ARM::VST1LNdAsm_F32: case ARM::VST1LNdAsm_I32: case ARM::VST1LNdAsm_S32:
5369  case ARM::VST1LNdAsm_U32: {
5370    MCInst TmpInst;
5371    // Shuffle the operands around so the lane index operand is in the
5372    // right place.
5373    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5374    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5375    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5376    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5377    TmpInst.addOperand(Inst.getOperand(1)); // lane
5378    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5379    TmpInst.addOperand(Inst.getOperand(5));
5380    Inst = TmpInst;
5381    return true;
5382  }
5383
5384  case ARM::VST2LNdAsm_8: case ARM::VST2LNdAsm_P8: case ARM::VST2LNdAsm_I8:
5385  case ARM::VST2LNdAsm_S8: case ARM::VST2LNdAsm_U8: case ARM::VST2LNdAsm_16:
5386  case ARM::VST2LNdAsm_P16: case ARM::VST2LNdAsm_I16: case ARM::VST2LNdAsm_S16:
5387  case ARM::VST2LNdAsm_U16: case ARM::VST2LNdAsm_32: case ARM::VST2LNdAsm_F:
5388  case ARM::VST2LNdAsm_F32: case ARM::VST2LNdAsm_I32: case ARM::VST2LNdAsm_S32:
5389  case ARM::VST2LNdAsm_U32: {
5390    MCInst TmpInst;
5391    // Shuffle the operands around so the lane index operand is in the
5392    // right place.
5393    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5394    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5395    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5396    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5397    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5398    TmpInst.addOperand(Inst.getOperand(1)); // lane
5399    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5400    TmpInst.addOperand(Inst.getOperand(5));
5401    Inst = TmpInst;
5402    return true;
5403  }
5404  // Handle NEON VLD complex aliases.
5405  case ARM::VLD1LNdWB_register_Asm_8: case ARM::VLD1LNdWB_register_Asm_P8:
5406  case ARM::VLD1LNdWB_register_Asm_I8: case ARM::VLD1LNdWB_register_Asm_S8:
5407  case ARM::VLD1LNdWB_register_Asm_U8: case ARM::VLD1LNdWB_register_Asm_16:
5408  case ARM::VLD1LNdWB_register_Asm_P16: case ARM::VLD1LNdWB_register_Asm_I16:
5409  case ARM::VLD1LNdWB_register_Asm_S16: case ARM::VLD1LNdWB_register_Asm_U16:
5410  case ARM::VLD1LNdWB_register_Asm_32: case ARM::VLD1LNdWB_register_Asm_F:
5411  case ARM::VLD1LNdWB_register_Asm_F32: case ARM::VLD1LNdWB_register_Asm_I32:
5412  case ARM::VLD1LNdWB_register_Asm_S32: case ARM::VLD1LNdWB_register_Asm_U32: {
5413    MCInst TmpInst;
5414    // Shuffle the operands around so the lane index operand is in the
5415    // right place.
5416    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5417    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5418    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5419    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5420    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5421    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5422    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5423    TmpInst.addOperand(Inst.getOperand(1)); // lane
5424    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5425    TmpInst.addOperand(Inst.getOperand(6));
5426    Inst = TmpInst;
5427    return true;
5428  }
5429
5430  case ARM::VLD2LNdWB_register_Asm_8: case ARM::VLD2LNdWB_register_Asm_P8:
5431  case ARM::VLD2LNdWB_register_Asm_I8: case ARM::VLD2LNdWB_register_Asm_S8:
5432  case ARM::VLD2LNdWB_register_Asm_U8: case ARM::VLD2LNdWB_register_Asm_16:
5433  case ARM::VLD2LNdWB_register_Asm_P16: case ARM::VLD2LNdWB_register_Asm_I16:
5434  case ARM::VLD2LNdWB_register_Asm_S16: case ARM::VLD2LNdWB_register_Asm_U16:
5435  case ARM::VLD2LNdWB_register_Asm_32: case ARM::VLD2LNdWB_register_Asm_F:
5436  case ARM::VLD2LNdWB_register_Asm_F32: case ARM::VLD2LNdWB_register_Asm_I32:
5437  case ARM::VLD2LNdWB_register_Asm_S32: case ARM::VLD2LNdWB_register_Asm_U32: {
5438    MCInst TmpInst;
5439    // Shuffle the operands around so the lane index operand is in the
5440    // right place.
5441    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5442    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5443    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5444    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5445    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5446    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5447    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5448    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5449    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5450    TmpInst.addOperand(Inst.getOperand(1)); // lane
5451    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5452    TmpInst.addOperand(Inst.getOperand(6));
5453    Inst = TmpInst;
5454    return true;
5455  }
5456
5457  case ARM::VLD1LNdWB_fixed_Asm_8: case ARM::VLD1LNdWB_fixed_Asm_P8:
5458  case ARM::VLD1LNdWB_fixed_Asm_I8: case ARM::VLD1LNdWB_fixed_Asm_S8:
5459  case ARM::VLD1LNdWB_fixed_Asm_U8: case ARM::VLD1LNdWB_fixed_Asm_16:
5460  case ARM::VLD1LNdWB_fixed_Asm_P16: case ARM::VLD1LNdWB_fixed_Asm_I16:
5461  case ARM::VLD1LNdWB_fixed_Asm_S16: case ARM::VLD1LNdWB_fixed_Asm_U16:
5462  case ARM::VLD1LNdWB_fixed_Asm_32: case ARM::VLD1LNdWB_fixed_Asm_F:
5463  case ARM::VLD1LNdWB_fixed_Asm_F32: case ARM::VLD1LNdWB_fixed_Asm_I32:
5464  case ARM::VLD1LNdWB_fixed_Asm_S32: case ARM::VLD1LNdWB_fixed_Asm_U32: {
5465    MCInst TmpInst;
5466    // Shuffle the operands around so the lane index operand is in the
5467    // right place.
5468    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5469    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5470    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5471    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5472    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5473    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5474    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5475    TmpInst.addOperand(Inst.getOperand(1)); // lane
5476    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5477    TmpInst.addOperand(Inst.getOperand(5));
5478    Inst = TmpInst;
5479    return true;
5480  }
5481
5482  case ARM::VLD2LNdWB_fixed_Asm_8: case ARM::VLD2LNdWB_fixed_Asm_P8:
5483  case ARM::VLD2LNdWB_fixed_Asm_I8: case ARM::VLD2LNdWB_fixed_Asm_S8:
5484  case ARM::VLD2LNdWB_fixed_Asm_U8: case ARM::VLD2LNdWB_fixed_Asm_16:
5485  case ARM::VLD2LNdWB_fixed_Asm_P16: case ARM::VLD2LNdWB_fixed_Asm_I16:
5486  case ARM::VLD2LNdWB_fixed_Asm_S16: case ARM::VLD2LNdWB_fixed_Asm_U16:
5487  case ARM::VLD2LNdWB_fixed_Asm_32: case ARM::VLD2LNdWB_fixed_Asm_F:
5488  case ARM::VLD2LNdWB_fixed_Asm_F32: case ARM::VLD2LNdWB_fixed_Asm_I32:
5489  case ARM::VLD2LNdWB_fixed_Asm_S32: case ARM::VLD2LNdWB_fixed_Asm_U32: {
5490    MCInst TmpInst;
5491    // Shuffle the operands around so the lane index operand is in the
5492    // right place.
5493    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5494    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5495    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5496    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5497    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5498    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5499    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5500    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5501    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5502    TmpInst.addOperand(Inst.getOperand(1)); // lane
5503    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5504    TmpInst.addOperand(Inst.getOperand(5));
5505    Inst = TmpInst;
5506    return true;
5507  }
5508
5509  case ARM::VLD1LNdAsm_8: case ARM::VLD1LNdAsm_P8: case ARM::VLD1LNdAsm_I8:
5510  case ARM::VLD1LNdAsm_S8: case ARM::VLD1LNdAsm_U8: case ARM::VLD1LNdAsm_16:
5511  case ARM::VLD1LNdAsm_P16: case ARM::VLD1LNdAsm_I16: case ARM::VLD1LNdAsm_S16:
5512  case ARM::VLD1LNdAsm_U16: case ARM::VLD1LNdAsm_32: case ARM::VLD1LNdAsm_F:
5513  case ARM::VLD1LNdAsm_F32: case ARM::VLD1LNdAsm_I32: case ARM::VLD1LNdAsm_S32:
5514  case ARM::VLD1LNdAsm_U32: {
5515    MCInst TmpInst;
5516    // Shuffle the operands around so the lane index operand is in the
5517    // right place.
5518    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5519    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5520    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5521    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5522    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5523    TmpInst.addOperand(Inst.getOperand(1)); // lane
5524    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5525    TmpInst.addOperand(Inst.getOperand(5));
5526    Inst = TmpInst;
5527    return true;
5528  }
5529
5530  case ARM::VLD2LNdAsm_8: case ARM::VLD2LNdAsm_P8: case ARM::VLD2LNdAsm_I8:
5531  case ARM::VLD2LNdAsm_S8: case ARM::VLD2LNdAsm_U8: case ARM::VLD2LNdAsm_16:
5532  case ARM::VLD2LNdAsm_P16: case ARM::VLD2LNdAsm_I16: case ARM::VLD2LNdAsm_S16:
5533  case ARM::VLD2LNdAsm_U16: case ARM::VLD2LNdAsm_32: case ARM::VLD2LNdAsm_F:
5534  case ARM::VLD2LNdAsm_F32: case ARM::VLD2LNdAsm_I32: case ARM::VLD2LNdAsm_S32:
5535  case ARM::VLD2LNdAsm_U32: {
5536    MCInst TmpInst;
5537    // Shuffle the operands around so the lane index operand is in the
5538    // right place.
5539    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5540    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5541    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5542    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5543    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5544    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5545    TmpInst.addOperand(MCOperand::CreateReg(Inst.getOperand(0).getReg()+1));
5546    TmpInst.addOperand(Inst.getOperand(1)); // lane
5547    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5548    TmpInst.addOperand(Inst.getOperand(5));
5549    Inst = TmpInst;
5550    return true;
5551  }
5552  // Handle the Thumb2 mode MOV complex aliases.
5553  case ARM::t2MOVsi:
5554  case ARM::t2MOVSsi: {
5555    // Which instruction to expand to depends on the CCOut operand and
5556    // whether we're in an IT block if the register operands are low
5557    // registers.
5558    bool isNarrow = false;
5559    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5560        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5561        inITBlock() == (Inst.getOpcode() == ARM::t2MOVsi))
5562      isNarrow = true;
5563    MCInst TmpInst;
5564    unsigned newOpc;
5565    switch(ARM_AM::getSORegShOp(Inst.getOperand(2).getImm())) {
5566    default: llvm_unreachable("unexpected opcode!");
5567    case ARM_AM::asr: newOpc = isNarrow ? ARM::tASRri : ARM::t2ASRri; break;
5568    case ARM_AM::lsr: newOpc = isNarrow ? ARM::tLSRri : ARM::t2LSRri; break;
5569    case ARM_AM::lsl: newOpc = isNarrow ? ARM::tLSLri : ARM::t2LSLri; break;
5570    case ARM_AM::ror: newOpc = ARM::t2RORri; isNarrow = false; break;
5571    }
5572    unsigned Ammount = ARM_AM::getSORegOffset(Inst.getOperand(2).getImm());
5573    if (Ammount == 32) Ammount = 0;
5574    TmpInst.setOpcode(newOpc);
5575    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5576    if (isNarrow)
5577      TmpInst.addOperand(MCOperand::CreateReg(
5578          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5579    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5580    TmpInst.addOperand(MCOperand::CreateImm(Ammount));
5581    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5582    TmpInst.addOperand(Inst.getOperand(4));
5583    if (!isNarrow)
5584      TmpInst.addOperand(MCOperand::CreateReg(
5585          Inst.getOpcode() == ARM::t2MOVSsi ? ARM::CPSR : 0));
5586    Inst = TmpInst;
5587    return true;
5588  }
5589  // Handle the ARM mode MOV complex aliases.
5590  case ARM::ASRr:
5591  case ARM::LSRr:
5592  case ARM::LSLr:
5593  case ARM::RORr: {
5594    ARM_AM::ShiftOpc ShiftTy;
5595    switch(Inst.getOpcode()) {
5596    default: llvm_unreachable("unexpected opcode!");
5597    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5598    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5599    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5600    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5601    }
5602    // A shift by zero is a plain MOVr, not a MOVsi.
5603    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5604    MCInst TmpInst;
5605    TmpInst.setOpcode(ARM::MOVsr);
5606    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5607    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5608    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5609    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5610    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5611    TmpInst.addOperand(Inst.getOperand(4));
5612    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5613    Inst = TmpInst;
5614    return true;
5615  }
5616  case ARM::ASRi:
5617  case ARM::LSRi:
5618  case ARM::LSLi:
5619  case ARM::RORi: {
5620    ARM_AM::ShiftOpc ShiftTy;
5621    switch(Inst.getOpcode()) {
5622    default: llvm_unreachable("unexpected opcode!");
5623    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5624    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5625    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5626    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5627    }
5628    // A shift by zero is a plain MOVr, not a MOVsi.
5629    unsigned Amt = Inst.getOperand(2).getImm();
5630    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5631    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5632    MCInst TmpInst;
5633    TmpInst.setOpcode(Opc);
5634    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5635    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5636    if (Opc == ARM::MOVsi)
5637      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5638    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5639    TmpInst.addOperand(Inst.getOperand(4));
5640    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5641    Inst = TmpInst;
5642    return true;
5643  }
5644  case ARM::RRXi: {
5645    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5646    MCInst TmpInst;
5647    TmpInst.setOpcode(ARM::MOVsi);
5648    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5649    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5650    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5651    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5652    TmpInst.addOperand(Inst.getOperand(3));
5653    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5654    Inst = TmpInst;
5655    return true;
5656  }
5657  case ARM::t2LDMIA_UPD: {
5658    // If this is a load of a single register, then we should use
5659    // a post-indexed LDR instruction instead, per the ARM ARM.
5660    if (Inst.getNumOperands() != 5)
5661      return false;
5662    MCInst TmpInst;
5663    TmpInst.setOpcode(ARM::t2LDR_POST);
5664    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5665    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5666    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5667    TmpInst.addOperand(MCOperand::CreateImm(4));
5668    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5669    TmpInst.addOperand(Inst.getOperand(3));
5670    Inst = TmpInst;
5671    return true;
5672  }
5673  case ARM::t2STMDB_UPD: {
5674    // If this is a store of a single register, then we should use
5675    // a pre-indexed STR instruction instead, per the ARM ARM.
5676    if (Inst.getNumOperands() != 5)
5677      return false;
5678    MCInst TmpInst;
5679    TmpInst.setOpcode(ARM::t2STR_PRE);
5680    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5681    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5682    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5683    TmpInst.addOperand(MCOperand::CreateImm(-4));
5684    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5685    TmpInst.addOperand(Inst.getOperand(3));
5686    Inst = TmpInst;
5687    return true;
5688  }
5689  case ARM::LDMIA_UPD:
5690    // If this is a load of a single register via a 'pop', then we should use
5691    // a post-indexed LDR instruction instead, per the ARM ARM.
5692    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5693        Inst.getNumOperands() == 5) {
5694      MCInst TmpInst;
5695      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5696      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5697      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5698      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5699      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5700      TmpInst.addOperand(MCOperand::CreateImm(4));
5701      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5702      TmpInst.addOperand(Inst.getOperand(3));
5703      Inst = TmpInst;
5704      return true;
5705    }
5706    break;
5707  case ARM::STMDB_UPD:
5708    // If this is a store of a single register via a 'push', then we should use
5709    // a pre-indexed STR instruction instead, per the ARM ARM.
5710    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5711        Inst.getNumOperands() == 5) {
5712      MCInst TmpInst;
5713      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5714      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5715      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5716      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5717      TmpInst.addOperand(MCOperand::CreateImm(-4));
5718      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5719      TmpInst.addOperand(Inst.getOperand(3));
5720      Inst = TmpInst;
5721    }
5722    break;
5723  case ARM::t2ADDri12:
5724    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5725    // mnemonic was used (not "addw"), encoding T3 is preferred.
5726    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5727        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5728      break;
5729    Inst.setOpcode(ARM::t2ADDri);
5730    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5731    break;
5732  case ARM::t2SUBri12:
5733    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5734    // mnemonic was used (not "subw"), encoding T3 is preferred.
5735    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5736        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5737      break;
5738    Inst.setOpcode(ARM::t2SUBri);
5739    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5740    break;
5741  case ARM::tADDi8:
5742    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5743    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5744    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5745    // to encoding T1 if <Rd> is omitted."
5746    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5747      Inst.setOpcode(ARM::tADDi3);
5748      return true;
5749    }
5750    break;
5751  case ARM::tSUBi8:
5752    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5753    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5754    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5755    // to encoding T1 if <Rd> is omitted."
5756    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5757      Inst.setOpcode(ARM::tSUBi3);
5758      return true;
5759    }
5760    break;
5761  case ARM::t2ADDrr: {
5762    // If the destination and first source operand are the same, and
5763    // there's no setting of the flags, use encoding T2 instead of T3.
5764    // Note that this is only for ADD, not SUB. This mirrors the system
5765    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5766    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5767        Inst.getOperand(5).getReg() != 0 ||
5768        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5769         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5770      break;
5771    MCInst TmpInst;
5772    TmpInst.setOpcode(ARM::tADDhirr);
5773    TmpInst.addOperand(Inst.getOperand(0));
5774    TmpInst.addOperand(Inst.getOperand(0));
5775    TmpInst.addOperand(Inst.getOperand(2));
5776    TmpInst.addOperand(Inst.getOperand(3));
5777    TmpInst.addOperand(Inst.getOperand(4));
5778    Inst = TmpInst;
5779    return true;
5780  }
5781  case ARM::tB:
5782    // A Thumb conditional branch outside of an IT block is a tBcc.
5783    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5784      Inst.setOpcode(ARM::tBcc);
5785      return true;
5786    }
5787    break;
5788  case ARM::t2B:
5789    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5790    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5791      Inst.setOpcode(ARM::t2Bcc);
5792      return true;
5793    }
5794    break;
5795  case ARM::t2Bcc:
5796    // If the conditional is AL or we're in an IT block, we really want t2B.
5797    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5798      Inst.setOpcode(ARM::t2B);
5799      return true;
5800    }
5801    break;
5802  case ARM::tBcc:
5803    // If the conditional is AL, we really want tB.
5804    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5805      Inst.setOpcode(ARM::tB);
5806      return true;
5807    }
5808    break;
5809  case ARM::tLDMIA: {
5810    // If the register list contains any high registers, or if the writeback
5811    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
5812    // instead if we're in Thumb2. Otherwise, this should have generated
5813    // an error in validateInstruction().
5814    unsigned Rn = Inst.getOperand(0).getReg();
5815    bool hasWritebackToken =
5816      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5817       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5818    bool listContainsBase;
5819    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
5820        (!listContainsBase && !hasWritebackToken) ||
5821        (listContainsBase && hasWritebackToken)) {
5822      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5823      assert (isThumbTwo());
5824      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
5825      // If we're switching to the updating version, we need to insert
5826      // the writeback tied operand.
5827      if (hasWritebackToken)
5828        Inst.insert(Inst.begin(),
5829                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
5830      return true;
5831    }
5832    break;
5833  }
5834  case ARM::tSTMIA_UPD: {
5835    // If the register list contains any high registers, we need to use
5836    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5837    // should have generated an error in validateInstruction().
5838    unsigned Rn = Inst.getOperand(0).getReg();
5839    bool listContainsBase;
5840    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
5841      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5842      assert (isThumbTwo());
5843      Inst.setOpcode(ARM::t2STMIA_UPD);
5844      return true;
5845    }
5846    break;
5847  }
5848  case ARM::tPOP: {
5849    bool listContainsBase;
5850    // If the register list contains any high registers, we need to use
5851    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5852    // should have generated an error in validateInstruction().
5853    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
5854      return false;
5855    assert (isThumbTwo());
5856    Inst.setOpcode(ARM::t2LDMIA_UPD);
5857    // Add the base register and writeback operands.
5858    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5859    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5860    return true;
5861  }
5862  case ARM::tPUSH: {
5863    bool listContainsBase;
5864    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
5865      return false;
5866    assert (isThumbTwo());
5867    Inst.setOpcode(ARM::t2STMDB_UPD);
5868    // Add the base register and writeback operands.
5869    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5870    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5871    return true;
5872  }
5873  case ARM::t2MOVi: {
5874    // If we can use the 16-bit encoding and the user didn't explicitly
5875    // request the 32-bit variant, transform it here.
5876    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5877        Inst.getOperand(1).getImm() <= 255 &&
5878        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
5879         Inst.getOperand(4).getReg() == ARM::CPSR) ||
5880        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
5881        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5882         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5883      // The operands aren't in the same order for tMOVi8...
5884      MCInst TmpInst;
5885      TmpInst.setOpcode(ARM::tMOVi8);
5886      TmpInst.addOperand(Inst.getOperand(0));
5887      TmpInst.addOperand(Inst.getOperand(4));
5888      TmpInst.addOperand(Inst.getOperand(1));
5889      TmpInst.addOperand(Inst.getOperand(2));
5890      TmpInst.addOperand(Inst.getOperand(3));
5891      Inst = TmpInst;
5892      return true;
5893    }
5894    break;
5895  }
5896  case ARM::t2MOVr: {
5897    // If we can use the 16-bit encoding and the user didn't explicitly
5898    // request the 32-bit variant, transform it here.
5899    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5900        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5901        Inst.getOperand(2).getImm() == ARMCC::AL &&
5902        Inst.getOperand(4).getReg() == ARM::CPSR &&
5903        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5904         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5905      // The operands aren't the same for tMOV[S]r... (no cc_out)
5906      MCInst TmpInst;
5907      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
5908      TmpInst.addOperand(Inst.getOperand(0));
5909      TmpInst.addOperand(Inst.getOperand(1));
5910      TmpInst.addOperand(Inst.getOperand(2));
5911      TmpInst.addOperand(Inst.getOperand(3));
5912      Inst = TmpInst;
5913      return true;
5914    }
5915    break;
5916  }
5917  case ARM::t2SXTH:
5918  case ARM::t2SXTB:
5919  case ARM::t2UXTH:
5920  case ARM::t2UXTB: {
5921    // If we can use the 16-bit encoding and the user didn't explicitly
5922    // request the 32-bit variant, transform it here.
5923    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5924        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5925        Inst.getOperand(2).getImm() == 0 &&
5926        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5927         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5928      unsigned NewOpc;
5929      switch (Inst.getOpcode()) {
5930      default: llvm_unreachable("Illegal opcode!");
5931      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
5932      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
5933      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
5934      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
5935      }
5936      // The operands aren't the same for thumb1 (no rotate operand).
5937      MCInst TmpInst;
5938      TmpInst.setOpcode(NewOpc);
5939      TmpInst.addOperand(Inst.getOperand(0));
5940      TmpInst.addOperand(Inst.getOperand(1));
5941      TmpInst.addOperand(Inst.getOperand(3));
5942      TmpInst.addOperand(Inst.getOperand(4));
5943      Inst = TmpInst;
5944      return true;
5945    }
5946    break;
5947  }
5948  case ARM::MOVsi: {
5949    ARM_AM::ShiftOpc SOpc = ARM_AM::getSORegShOp(Inst.getOperand(2).getImm());
5950    if (SOpc == ARM_AM::rrx) return false;
5951    if (ARM_AM::getSORegOffset(Inst.getOperand(2).getImm()) == 0) {
5952      // Shifting by zero is accepted as a vanilla 'MOVr'
5953      MCInst TmpInst;
5954      TmpInst.setOpcode(ARM::MOVr);
5955      TmpInst.addOperand(Inst.getOperand(0));
5956      TmpInst.addOperand(Inst.getOperand(1));
5957      TmpInst.addOperand(Inst.getOperand(3));
5958      TmpInst.addOperand(Inst.getOperand(4));
5959      TmpInst.addOperand(Inst.getOperand(5));
5960      Inst = TmpInst;
5961      return true;
5962    }
5963    return false;
5964  }
5965  case ARM::t2IT: {
5966    // The mask bits for all but the first condition are represented as
5967    // the low bit of the condition code value implies 't'. We currently
5968    // always have 1 implies 't', so XOR toggle the bits if the low bit
5969    // of the condition code is zero. The encoding also expects the low
5970    // bit of the condition to be encoded as bit 4 of the mask operand,
5971    // so mask that in if needed
5972    MCOperand &MO = Inst.getOperand(1);
5973    unsigned Mask = MO.getImm();
5974    unsigned OrigMask = Mask;
5975    unsigned TZ = CountTrailingZeros_32(Mask);
5976    if ((Inst.getOperand(0).getImm() & 1) == 0) {
5977      assert(Mask && TZ <= 3 && "illegal IT mask value!");
5978      for (unsigned i = 3; i != TZ; --i)
5979        Mask ^= 1 << i;
5980    } else
5981      Mask |= 0x10;
5982    MO.setImm(Mask);
5983
5984    // Set up the IT block state according to the IT instruction we just
5985    // matched.
5986    assert(!inITBlock() && "nested IT blocks?!");
5987    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
5988    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
5989    ITState.CurPosition = 0;
5990    ITState.FirstCond = true;
5991    break;
5992  }
5993  }
5994  return false;
5995}
5996
5997unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
5998  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
5999  // suffix depending on whether they're in an IT block or not.
6000  unsigned Opc = Inst.getOpcode();
6001  const MCInstrDesc &MCID = getInstDesc(Opc);
6002  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
6003    assert(MCID.hasOptionalDef() &&
6004           "optionally flag setting instruction missing optional def operand");
6005    assert(MCID.NumOperands == Inst.getNumOperands() &&
6006           "operand count mismatch!");
6007    // Find the optional-def operand (cc_out).
6008    unsigned OpNo;
6009    for (OpNo = 0;
6010         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
6011         ++OpNo)
6012      ;
6013    // If we're parsing Thumb1, reject it completely.
6014    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
6015      return Match_MnemonicFail;
6016    // If we're parsing Thumb2, which form is legal depends on whether we're
6017    // in an IT block.
6018    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
6019        !inITBlock())
6020      return Match_RequiresITBlock;
6021    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
6022        inITBlock())
6023      return Match_RequiresNotITBlock;
6024  }
6025  // Some high-register supporting Thumb1 encodings only allow both registers
6026  // to be from r0-r7 when in Thumb2.
6027  else if (Opc == ARM::tADDhirr && isThumbOne() &&
6028           isARMLowRegister(Inst.getOperand(1).getReg()) &&
6029           isARMLowRegister(Inst.getOperand(2).getReg()))
6030    return Match_RequiresThumb2;
6031  // Others only require ARMv6 or later.
6032  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
6033           isARMLowRegister(Inst.getOperand(0).getReg()) &&
6034           isARMLowRegister(Inst.getOperand(1).getReg()))
6035    return Match_RequiresV6;
6036  return Match_Success;
6037}
6038
6039bool ARMAsmParser::
6040MatchAndEmitInstruction(SMLoc IDLoc,
6041                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
6042                        MCStreamer &Out) {
6043  MCInst Inst;
6044  unsigned ErrorInfo;
6045  unsigned MatchResult;
6046  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
6047  switch (MatchResult) {
6048  default: break;
6049  case Match_Success:
6050    // Context sensitive operand constraints aren't handled by the matcher,
6051    // so check them here.
6052    if (validateInstruction(Inst, Operands)) {
6053      // Still progress the IT block, otherwise one wrong condition causes
6054      // nasty cascading errors.
6055      forwardITPosition();
6056      return true;
6057    }
6058
6059    // Some instructions need post-processing to, for example, tweak which
6060    // encoding is selected. Loop on it while changes happen so the
6061    // individual transformations can chain off each other. E.g.,
6062    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
6063    while (processInstruction(Inst, Operands))
6064      ;
6065
6066    // Only move forward at the very end so that everything in validate
6067    // and process gets a consistent answer about whether we're in an IT
6068    // block.
6069    forwardITPosition();
6070
6071    Out.EmitInstruction(Inst);
6072    return false;
6073  case Match_MissingFeature:
6074    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
6075    return true;
6076  case Match_InvalidOperand: {
6077    SMLoc ErrorLoc = IDLoc;
6078    if (ErrorInfo != ~0U) {
6079      if (ErrorInfo >= Operands.size())
6080        return Error(IDLoc, "too few operands for instruction");
6081
6082      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
6083      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
6084    }
6085
6086    return Error(ErrorLoc, "invalid operand for instruction");
6087  }
6088  case Match_MnemonicFail:
6089    return Error(IDLoc, "invalid instruction");
6090  case Match_ConversionFail:
6091    // The converter function will have already emited a diagnostic.
6092    return true;
6093  case Match_RequiresNotITBlock:
6094    return Error(IDLoc, "flag setting instruction only valid outside IT block");
6095  case Match_RequiresITBlock:
6096    return Error(IDLoc, "instruction only valid inside IT block");
6097  case Match_RequiresV6:
6098    return Error(IDLoc, "instruction variant requires ARMv6 or later");
6099  case Match_RequiresThumb2:
6100    return Error(IDLoc, "instruction variant requires Thumb2");
6101  }
6102
6103  llvm_unreachable("Implement any new match types added!");
6104  return true;
6105}
6106
6107/// parseDirective parses the arm specific directives
6108bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
6109  StringRef IDVal = DirectiveID.getIdentifier();
6110  if (IDVal == ".word")
6111    return parseDirectiveWord(4, DirectiveID.getLoc());
6112  else if (IDVal == ".thumb")
6113    return parseDirectiveThumb(DirectiveID.getLoc());
6114  else if (IDVal == ".arm")
6115    return parseDirectiveARM(DirectiveID.getLoc());
6116  else if (IDVal == ".thumb_func")
6117    return parseDirectiveThumbFunc(DirectiveID.getLoc());
6118  else if (IDVal == ".code")
6119    return parseDirectiveCode(DirectiveID.getLoc());
6120  else if (IDVal == ".syntax")
6121    return parseDirectiveSyntax(DirectiveID.getLoc());
6122  else if (IDVal == ".unreq")
6123    return parseDirectiveUnreq(DirectiveID.getLoc());
6124  return true;
6125}
6126
6127/// parseDirectiveWord
6128///  ::= .word [ expression (, expression)* ]
6129bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
6130  if (getLexer().isNot(AsmToken::EndOfStatement)) {
6131    for (;;) {
6132      const MCExpr *Value;
6133      if (getParser().ParseExpression(Value))
6134        return true;
6135
6136      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
6137
6138      if (getLexer().is(AsmToken::EndOfStatement))
6139        break;
6140
6141      // FIXME: Improve diagnostic.
6142      if (getLexer().isNot(AsmToken::Comma))
6143        return Error(L, "unexpected token in directive");
6144      Parser.Lex();
6145    }
6146  }
6147
6148  Parser.Lex();
6149  return false;
6150}
6151
6152/// parseDirectiveThumb
6153///  ::= .thumb
6154bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
6155  if (getLexer().isNot(AsmToken::EndOfStatement))
6156    return Error(L, "unexpected token in directive");
6157  Parser.Lex();
6158
6159  if (!isThumb())
6160    SwitchMode();
6161  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6162  return false;
6163}
6164
6165/// parseDirectiveARM
6166///  ::= .arm
6167bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
6168  if (getLexer().isNot(AsmToken::EndOfStatement))
6169    return Error(L, "unexpected token in directive");
6170  Parser.Lex();
6171
6172  if (isThumb())
6173    SwitchMode();
6174  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6175  return false;
6176}
6177
6178/// parseDirectiveThumbFunc
6179///  ::= .thumbfunc symbol_name
6180bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
6181  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
6182  bool isMachO = MAI.hasSubsectionsViaSymbols();
6183  StringRef Name;
6184
6185  // Darwin asm has function name after .thumb_func direction
6186  // ELF doesn't
6187  if (isMachO) {
6188    const AsmToken &Tok = Parser.getTok();
6189    if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
6190      return Error(L, "unexpected token in .thumb_func directive");
6191    Name = Tok.getIdentifier();
6192    Parser.Lex(); // Consume the identifier token.
6193  }
6194
6195 if (getLexer().isNot(AsmToken::EndOfStatement))
6196    return Error(L, "unexpected token in directive");
6197  Parser.Lex();
6198
6199  // FIXME: assuming function name will be the line following .thumb_func
6200  if (!isMachO) {
6201    Name = Parser.getTok().getIdentifier();
6202  }
6203
6204  // Mark symbol as a thumb symbol.
6205  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
6206  getParser().getStreamer().EmitThumbFunc(Func);
6207  return false;
6208}
6209
6210/// parseDirectiveSyntax
6211///  ::= .syntax unified | divided
6212bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
6213  const AsmToken &Tok = Parser.getTok();
6214  if (Tok.isNot(AsmToken::Identifier))
6215    return Error(L, "unexpected token in .syntax directive");
6216  StringRef Mode = Tok.getString();
6217  if (Mode == "unified" || Mode == "UNIFIED")
6218    Parser.Lex();
6219  else if (Mode == "divided" || Mode == "DIVIDED")
6220    return Error(L, "'.syntax divided' arm asssembly not supported");
6221  else
6222    return Error(L, "unrecognized syntax mode in .syntax directive");
6223
6224  if (getLexer().isNot(AsmToken::EndOfStatement))
6225    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6226  Parser.Lex();
6227
6228  // TODO tell the MC streamer the mode
6229  // getParser().getStreamer().Emit???();
6230  return false;
6231}
6232
6233/// parseDirectiveCode
6234///  ::= .code 16 | 32
6235bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
6236  const AsmToken &Tok = Parser.getTok();
6237  if (Tok.isNot(AsmToken::Integer))
6238    return Error(L, "unexpected token in .code directive");
6239  int64_t Val = Parser.getTok().getIntVal();
6240  if (Val == 16)
6241    Parser.Lex();
6242  else if (Val == 32)
6243    Parser.Lex();
6244  else
6245    return Error(L, "invalid operand to .code directive");
6246
6247  if (getLexer().isNot(AsmToken::EndOfStatement))
6248    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
6249  Parser.Lex();
6250
6251  if (Val == 16) {
6252    if (!isThumb())
6253      SwitchMode();
6254    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
6255  } else {
6256    if (isThumb())
6257      SwitchMode();
6258    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
6259  }
6260
6261  return false;
6262}
6263
6264/// parseDirectiveReq
6265///  ::= name .req registername
6266bool ARMAsmParser::parseDirectiveReq(StringRef Name, SMLoc L) {
6267  Parser.Lex(); // Eat the '.req' token.
6268  unsigned Reg;
6269  SMLoc SRegLoc, ERegLoc;
6270  if (ParseRegister(Reg, SRegLoc, ERegLoc)) {
6271    Parser.EatToEndOfStatement();
6272    return Error(SRegLoc, "register name expected");
6273  }
6274
6275  // Shouldn't be anything else.
6276  if (Parser.getTok().isNot(AsmToken::EndOfStatement)) {
6277    Parser.EatToEndOfStatement();
6278    return Error(Parser.getTok().getLoc(),
6279                 "unexpected input in .req directive.");
6280  }
6281
6282  Parser.Lex(); // Consume the EndOfStatement
6283
6284  if (RegisterReqs.GetOrCreateValue(Name, Reg).getValue() != Reg)
6285    return Error(SRegLoc, "redefinition of '" + Name +
6286                          "' does not match original.");
6287
6288  return false;
6289}
6290
6291/// parseDirectiveUneq
6292///  ::= .unreq registername
6293bool ARMAsmParser::parseDirectiveUnreq(SMLoc L) {
6294  if (Parser.getTok().isNot(AsmToken::Identifier)) {
6295    Parser.EatToEndOfStatement();
6296    return Error(L, "unexpected input in .unreq directive.");
6297  }
6298  RegisterReqs.erase(Parser.getTok().getIdentifier());
6299  Parser.Lex(); // Eat the identifier.
6300  return false;
6301}
6302
6303extern "C" void LLVMInitializeARMAsmLexer();
6304
6305/// Force static initialization.
6306extern "C" void LLVMInitializeARMAsmParser() {
6307  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
6308  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
6309  LLVMInitializeARMAsmLexer();
6310}
6311
6312#define GET_REGISTER_MATCHER
6313#define GET_MATCHER_IMPLEMENTATION
6314#include "ARMGenAsmMatcher.inc"
6315