ARMAsmParser.cpp revision 40e285554773c51f6dd6eb8d076256e557fab9c3
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  struct {
49    ARMCC::CondCodes Cond;    // Condition for IT block.
50    unsigned Mask:4;          // Condition mask for instructions.
51                              // Starting at first 1 (from lsb).
52                              //   '1'  condition as indicated in IT.
53                              //   '0'  inverse of condition (else).
54                              // Count of instructions in IT block is
55                              // 4 - trailingzeroes(mask)
56
57    bool FirstCond;           // Explicit flag for when we're parsing the
58                              // First instruction in the IT block. It's
59                              // implied in the mask, so needs special
60                              // handling.
61
62    unsigned CurPosition;     // Current position in parsing of IT
63                              // block. In range [0,3]. Initialized
64                              // according to count of instructions in block.
65                              // ~0U if no active IT block.
66  } ITState;
67  bool inITBlock() { return ITState.CurPosition != ~0U;}
68  void forwardITPosition() {
69    if (!inITBlock()) return;
70    // Move to the next instruction in the IT block, if there is one. If not,
71    // mark the block as done.
72    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
73    if (++ITState.CurPosition == 5 - TZ)
74      ITState.CurPosition = ~0U; // Done with the IT block after this.
75  }
76
77
78  MCAsmParser &getParser() const { return Parser; }
79  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
80
81  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
82  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
83
84  int tryParseRegister();
85  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
86  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
87  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
88  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
89  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
90  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
91  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
92                              unsigned &ShiftAmount);
93  bool parseDirectiveWord(unsigned Size, SMLoc L);
94  bool parseDirectiveThumb(SMLoc L);
95  bool parseDirectiveARM(SMLoc L);
96  bool parseDirectiveThumbFunc(SMLoc L);
97  bool parseDirectiveCode(SMLoc L);
98  bool parseDirectiveSyntax(SMLoc L);
99
100  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
101                          bool &CarrySetting, unsigned &ProcessorIMod,
102                          StringRef &ITMask);
103  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
104                             bool &CanAcceptPredicationCode);
105
106  bool isThumb() const {
107    // FIXME: Can tablegen auto-generate this?
108    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
109  }
110  bool isThumbOne() const {
111    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
112  }
113  bool isThumbTwo() const {
114    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
115  }
116  bool hasV6Ops() const {
117    return STI.getFeatureBits() & ARM::HasV6Ops;
118  }
119  bool hasV7Ops() const {
120    return STI.getFeatureBits() & ARM::HasV7Ops;
121  }
122  void SwitchMode() {
123    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
124    setAvailableFeatures(FB);
125  }
126  bool isMClass() const {
127    return STI.getFeatureBits() & ARM::FeatureMClass;
128  }
129
130  /// @name Auto-generated Match Functions
131  /// {
132
133#define GET_ASSEMBLER_HEADER
134#include "ARMGenAsmMatcher.inc"
135
136  /// }
137
138  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
139  OperandMatchResultTy parseCoprocNumOperand(
140    SmallVectorImpl<MCParsedAsmOperand*>&);
141  OperandMatchResultTy parseCoprocRegOperand(
142    SmallVectorImpl<MCParsedAsmOperand*>&);
143  OperandMatchResultTy parseCoprocOptionOperand(
144    SmallVectorImpl<MCParsedAsmOperand*>&);
145  OperandMatchResultTy parseMemBarrierOptOperand(
146    SmallVectorImpl<MCParsedAsmOperand*>&);
147  OperandMatchResultTy parseProcIFlagsOperand(
148    SmallVectorImpl<MCParsedAsmOperand*>&);
149  OperandMatchResultTy parseMSRMaskOperand(
150    SmallVectorImpl<MCParsedAsmOperand*>&);
151  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
152                                   StringRef Op, int Low, int High);
153  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
154    return parsePKHImm(O, "lsl", 0, 31);
155  }
156  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
157    return parsePKHImm(O, "asr", 1, 32);
158  }
159  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
160  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
161  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
162  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
163  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
164  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
167  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
168
169  // Asm Match Converter Methods
170  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
171                    const SmallVectorImpl<MCParsedAsmOperand*> &);
172  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
173                    const SmallVectorImpl<MCParsedAsmOperand*> &);
174  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
175                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
176  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
177                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
178  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
179                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
180  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
181                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
182  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
183                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
184  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
185                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
186  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
187                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
188  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
189                             const SmallVectorImpl<MCParsedAsmOperand*> &);
190  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
191                             const SmallVectorImpl<MCParsedAsmOperand*> &);
192  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
193                             const SmallVectorImpl<MCParsedAsmOperand*> &);
194  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
195                             const SmallVectorImpl<MCParsedAsmOperand*> &);
196  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
197                  const SmallVectorImpl<MCParsedAsmOperand*> &);
198  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
199                  const SmallVectorImpl<MCParsedAsmOperand*> &);
200  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
201                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
202  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
203                        const SmallVectorImpl<MCParsedAsmOperand*> &);
204  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
205                     const SmallVectorImpl<MCParsedAsmOperand*> &);
206  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
207                        const SmallVectorImpl<MCParsedAsmOperand*> &);
208  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
209                     const SmallVectorImpl<MCParsedAsmOperand*> &);
210  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
211                        const SmallVectorImpl<MCParsedAsmOperand*> &);
212
213  bool validateInstruction(MCInst &Inst,
214                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
215  bool processInstruction(MCInst &Inst,
216                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
217  bool shouldOmitCCOutOperand(StringRef Mnemonic,
218                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
219
220public:
221  enum ARMMatchResultTy {
222    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
223    Match_RequiresNotITBlock,
224    Match_RequiresV6,
225    Match_RequiresThumb2
226  };
227
228  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
229    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
230    MCAsmParserExtension::Initialize(_Parser);
231
232    // Initialize the set of available features.
233    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
234
235    // Not in an ITBlock to start with.
236    ITState.CurPosition = ~0U;
237  }
238
239  // Implementation of the MCTargetAsmParser interface:
240  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
241  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
242                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
243  bool ParseDirective(AsmToken DirectiveID);
244
245  unsigned checkTargetMatchPredicate(MCInst &Inst);
246
247  bool MatchAndEmitInstruction(SMLoc IDLoc,
248                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
249                               MCStreamer &Out);
250};
251} // end anonymous namespace
252
253namespace {
254
255/// ARMOperand - Instances of this class represent a parsed ARM machine
256/// instruction.
257class ARMOperand : public MCParsedAsmOperand {
258  enum KindTy {
259    k_CondCode,
260    k_CCOut,
261    k_ITCondMask,
262    k_CoprocNum,
263    k_CoprocReg,
264    k_CoprocOption,
265    k_Immediate,
266    k_FPImmediate,
267    k_MemBarrierOpt,
268    k_Memory,
269    k_PostIndexRegister,
270    k_MSRMask,
271    k_ProcIFlags,
272    k_VectorIndex,
273    k_Register,
274    k_RegisterList,
275    k_DPRRegisterList,
276    k_SPRRegisterList,
277    k_VectorList,
278    k_VectorListAllLanes,
279    k_VectorListIndexed,
280    k_ShiftedRegister,
281    k_ShiftedImmediate,
282    k_ShifterImmediate,
283    k_RotateImmediate,
284    k_BitfieldDescriptor,
285    k_Token
286  } Kind;
287
288  SMLoc StartLoc, EndLoc;
289  SmallVector<unsigned, 8> Registers;
290
291  union {
292    struct {
293      ARMCC::CondCodes Val;
294    } CC;
295
296    struct {
297      unsigned Val;
298    } Cop;
299
300    struct {
301      unsigned Val;
302    } CoprocOption;
303
304    struct {
305      unsigned Mask:4;
306    } ITMask;
307
308    struct {
309      ARM_MB::MemBOpt Val;
310    } MBOpt;
311
312    struct {
313      ARM_PROC::IFlags Val;
314    } IFlags;
315
316    struct {
317      unsigned Val;
318    } MMask;
319
320    struct {
321      const char *Data;
322      unsigned Length;
323    } Tok;
324
325    struct {
326      unsigned RegNum;
327    } Reg;
328
329    // A vector register list is a sequential list of 1 to 4 registers.
330    struct {
331      unsigned RegNum;
332      unsigned Count;
333      unsigned LaneIndex;
334    } VectorList;
335
336    struct {
337      unsigned Val;
338    } VectorIndex;
339
340    struct {
341      const MCExpr *Val;
342    } Imm;
343
344    struct {
345      unsigned Val;       // encoded 8-bit representation
346    } FPImm;
347
348    /// Combined record for all forms of ARM address expressions.
349    struct {
350      unsigned BaseRegNum;
351      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
352      // was specified.
353      const MCConstantExpr *OffsetImm;  // Offset immediate value
354      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
355      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
356      unsigned ShiftImm;        // shift for OffsetReg.
357      unsigned Alignment;       // 0 = no alignment specified
358                                // n = alignment in bytes (8, 16, or 32)
359      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
360    } Memory;
361
362    struct {
363      unsigned RegNum;
364      bool isAdd;
365      ARM_AM::ShiftOpc ShiftTy;
366      unsigned ShiftImm;
367    } PostIdxReg;
368
369    struct {
370      bool isASR;
371      unsigned Imm;
372    } ShifterImm;
373    struct {
374      ARM_AM::ShiftOpc ShiftTy;
375      unsigned SrcReg;
376      unsigned ShiftReg;
377      unsigned ShiftImm;
378    } RegShiftedReg;
379    struct {
380      ARM_AM::ShiftOpc ShiftTy;
381      unsigned SrcReg;
382      unsigned ShiftImm;
383    } RegShiftedImm;
384    struct {
385      unsigned Imm;
386    } RotImm;
387    struct {
388      unsigned LSB;
389      unsigned Width;
390    } Bitfield;
391  };
392
393  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
394public:
395  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
396    Kind = o.Kind;
397    StartLoc = o.StartLoc;
398    EndLoc = o.EndLoc;
399    switch (Kind) {
400    case k_CondCode:
401      CC = o.CC;
402      break;
403    case k_ITCondMask:
404      ITMask = o.ITMask;
405      break;
406    case k_Token:
407      Tok = o.Tok;
408      break;
409    case k_CCOut:
410    case k_Register:
411      Reg = o.Reg;
412      break;
413    case k_RegisterList:
414    case k_DPRRegisterList:
415    case k_SPRRegisterList:
416      Registers = o.Registers;
417      break;
418    case k_VectorList:
419    case k_VectorListAllLanes:
420    case k_VectorListIndexed:
421      VectorList = o.VectorList;
422      break;
423    case k_CoprocNum:
424    case k_CoprocReg:
425      Cop = o.Cop;
426      break;
427    case k_CoprocOption:
428      CoprocOption = o.CoprocOption;
429      break;
430    case k_Immediate:
431      Imm = o.Imm;
432      break;
433    case k_FPImmediate:
434      FPImm = o.FPImm;
435      break;
436    case k_MemBarrierOpt:
437      MBOpt = o.MBOpt;
438      break;
439    case k_Memory:
440      Memory = o.Memory;
441      break;
442    case k_PostIndexRegister:
443      PostIdxReg = o.PostIdxReg;
444      break;
445    case k_MSRMask:
446      MMask = o.MMask;
447      break;
448    case k_ProcIFlags:
449      IFlags = o.IFlags;
450      break;
451    case k_ShifterImmediate:
452      ShifterImm = o.ShifterImm;
453      break;
454    case k_ShiftedRegister:
455      RegShiftedReg = o.RegShiftedReg;
456      break;
457    case k_ShiftedImmediate:
458      RegShiftedImm = o.RegShiftedImm;
459      break;
460    case k_RotateImmediate:
461      RotImm = o.RotImm;
462      break;
463    case k_BitfieldDescriptor:
464      Bitfield = o.Bitfield;
465      break;
466    case k_VectorIndex:
467      VectorIndex = o.VectorIndex;
468      break;
469    }
470  }
471
472  /// getStartLoc - Get the location of the first token of this operand.
473  SMLoc getStartLoc() const { return StartLoc; }
474  /// getEndLoc - Get the location of the last token of this operand.
475  SMLoc getEndLoc() const { return EndLoc; }
476
477  ARMCC::CondCodes getCondCode() const {
478    assert(Kind == k_CondCode && "Invalid access!");
479    return CC.Val;
480  }
481
482  unsigned getCoproc() const {
483    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
484    return Cop.Val;
485  }
486
487  StringRef getToken() const {
488    assert(Kind == k_Token && "Invalid access!");
489    return StringRef(Tok.Data, Tok.Length);
490  }
491
492  unsigned getReg() const {
493    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
494    return Reg.RegNum;
495  }
496
497  const SmallVectorImpl<unsigned> &getRegList() const {
498    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
499            Kind == k_SPRRegisterList) && "Invalid access!");
500    return Registers;
501  }
502
503  const MCExpr *getImm() const {
504    assert(Kind == k_Immediate && "Invalid access!");
505    return Imm.Val;
506  }
507
508  unsigned getFPImm() const {
509    assert(Kind == k_FPImmediate && "Invalid access!");
510    return FPImm.Val;
511  }
512
513  unsigned getVectorIndex() const {
514    assert(Kind == k_VectorIndex && "Invalid access!");
515    return VectorIndex.Val;
516  }
517
518  ARM_MB::MemBOpt getMemBarrierOpt() const {
519    assert(Kind == k_MemBarrierOpt && "Invalid access!");
520    return MBOpt.Val;
521  }
522
523  ARM_PROC::IFlags getProcIFlags() const {
524    assert(Kind == k_ProcIFlags && "Invalid access!");
525    return IFlags.Val;
526  }
527
528  unsigned getMSRMask() const {
529    assert(Kind == k_MSRMask && "Invalid access!");
530    return MMask.Val;
531  }
532
533  bool isCoprocNum() const { return Kind == k_CoprocNum; }
534  bool isCoprocReg() const { return Kind == k_CoprocReg; }
535  bool isCoprocOption() const { return Kind == k_CoprocOption; }
536  bool isCondCode() const { return Kind == k_CondCode; }
537  bool isCCOut() const { return Kind == k_CCOut; }
538  bool isITMask() const { return Kind == k_ITCondMask; }
539  bool isITCondCode() const { return Kind == k_CondCode; }
540  bool isImm() const { return Kind == k_Immediate; }
541  bool isFPImm() const { return Kind == k_FPImmediate; }
542  bool isImm8s4() const {
543    if (Kind != k_Immediate)
544      return false;
545    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
546    if (!CE) return false;
547    int64_t Value = CE->getValue();
548    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
549  }
550  bool isImm0_1020s4() const {
551    if (Kind != k_Immediate)
552      return false;
553    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
554    if (!CE) return false;
555    int64_t Value = CE->getValue();
556    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
557  }
558  bool isImm0_508s4() const {
559    if (Kind != k_Immediate)
560      return false;
561    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
562    if (!CE) return false;
563    int64_t Value = CE->getValue();
564    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
565  }
566  bool isImm0_255() const {
567    if (Kind != k_Immediate)
568      return false;
569    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
570    if (!CE) return false;
571    int64_t Value = CE->getValue();
572    return Value >= 0 && Value < 256;
573  }
574  bool isImm0_1() const {
575    if (Kind != k_Immediate)
576      return false;
577    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
578    if (!CE) return false;
579    int64_t Value = CE->getValue();
580    return Value >= 0 && Value < 2;
581  }
582  bool isImm0_3() const {
583    if (Kind != k_Immediate)
584      return false;
585    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
586    if (!CE) return false;
587    int64_t Value = CE->getValue();
588    return Value >= 0 && Value < 4;
589  }
590  bool isImm0_7() const {
591    if (Kind != k_Immediate)
592      return false;
593    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
594    if (!CE) return false;
595    int64_t Value = CE->getValue();
596    return Value >= 0 && Value < 8;
597  }
598  bool isImm0_15() const {
599    if (Kind != k_Immediate)
600      return false;
601    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
602    if (!CE) return false;
603    int64_t Value = CE->getValue();
604    return Value >= 0 && Value < 16;
605  }
606  bool isImm0_31() const {
607    if (Kind != k_Immediate)
608      return false;
609    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
610    if (!CE) return false;
611    int64_t Value = CE->getValue();
612    return Value >= 0 && Value < 32;
613  }
614  bool isImm0_63() const {
615    if (Kind != k_Immediate)
616      return false;
617    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
618    if (!CE) return false;
619    int64_t Value = CE->getValue();
620    return Value >= 0 && Value < 64;
621  }
622  bool isImm8() const {
623    if (Kind != k_Immediate)
624      return false;
625    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
626    if (!CE) return false;
627    int64_t Value = CE->getValue();
628    return Value == 8;
629  }
630  bool isImm16() const {
631    if (Kind != k_Immediate)
632      return false;
633    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
634    if (!CE) return false;
635    int64_t Value = CE->getValue();
636    return Value == 16;
637  }
638  bool isImm32() const {
639    if (Kind != k_Immediate)
640      return false;
641    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
642    if (!CE) return false;
643    int64_t Value = CE->getValue();
644    return Value == 32;
645  }
646  bool isImm1_7() const {
647    if (Kind != k_Immediate)
648      return false;
649    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
650    if (!CE) return false;
651    int64_t Value = CE->getValue();
652    return Value > 0 && Value < 8;
653  }
654  bool isImm1_15() const {
655    if (Kind != k_Immediate)
656      return false;
657    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
658    if (!CE) return false;
659    int64_t Value = CE->getValue();
660    return Value > 0 && Value < 16;
661  }
662  bool isImm1_31() const {
663    if (Kind != k_Immediate)
664      return false;
665    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
666    if (!CE) return false;
667    int64_t Value = CE->getValue();
668    return Value > 0 && Value < 32;
669  }
670  bool isImm1_16() const {
671    if (Kind != k_Immediate)
672      return false;
673    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
674    if (!CE) return false;
675    int64_t Value = CE->getValue();
676    return Value > 0 && Value < 17;
677  }
678  bool isImm1_32() const {
679    if (Kind != k_Immediate)
680      return false;
681    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
682    if (!CE) return false;
683    int64_t Value = CE->getValue();
684    return Value > 0 && Value < 33;
685  }
686  bool isImm0_32() const {
687    if (Kind != k_Immediate)
688      return false;
689    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
690    if (!CE) return false;
691    int64_t Value = CE->getValue();
692    return Value >= 0 && Value < 33;
693  }
694  bool isImm0_65535() const {
695    if (Kind != k_Immediate)
696      return false;
697    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
698    if (!CE) return false;
699    int64_t Value = CE->getValue();
700    return Value >= 0 && Value < 65536;
701  }
702  bool isImm0_65535Expr() const {
703    if (Kind != k_Immediate)
704      return false;
705    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
706    // If it's not a constant expression, it'll generate a fixup and be
707    // handled later.
708    if (!CE) return true;
709    int64_t Value = CE->getValue();
710    return Value >= 0 && Value < 65536;
711  }
712  bool isImm24bit() const {
713    if (Kind != k_Immediate)
714      return false;
715    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
716    if (!CE) return false;
717    int64_t Value = CE->getValue();
718    return Value >= 0 && Value <= 0xffffff;
719  }
720  bool isImmThumbSR() const {
721    if (Kind != k_Immediate)
722      return false;
723    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
724    if (!CE) return false;
725    int64_t Value = CE->getValue();
726    return Value > 0 && Value < 33;
727  }
728  bool isPKHLSLImm() const {
729    if (Kind != k_Immediate)
730      return false;
731    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
732    if (!CE) return false;
733    int64_t Value = CE->getValue();
734    return Value >= 0 && Value < 32;
735  }
736  bool isPKHASRImm() const {
737    if (Kind != k_Immediate)
738      return false;
739    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
740    if (!CE) return false;
741    int64_t Value = CE->getValue();
742    return Value > 0 && Value <= 32;
743  }
744  bool isARMSOImm() const {
745    if (Kind != k_Immediate)
746      return false;
747    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
748    if (!CE) return false;
749    int64_t Value = CE->getValue();
750    return ARM_AM::getSOImmVal(Value) != -1;
751  }
752  bool isARMSOImmNot() const {
753    if (Kind != k_Immediate)
754      return false;
755    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
756    if (!CE) return false;
757    int64_t Value = CE->getValue();
758    return ARM_AM::getSOImmVal(~Value) != -1;
759  }
760  bool isARMSOImmNeg() const {
761    if (Kind != k_Immediate)
762      return false;
763    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
764    if (!CE) return false;
765    int64_t Value = CE->getValue();
766    return ARM_AM::getSOImmVal(-Value) != -1;
767  }
768  bool isT2SOImm() const {
769    if (Kind != k_Immediate)
770      return false;
771    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
772    if (!CE) return false;
773    int64_t Value = CE->getValue();
774    return ARM_AM::getT2SOImmVal(Value) != -1;
775  }
776  bool isT2SOImmNot() const {
777    if (Kind != k_Immediate)
778      return false;
779    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
780    if (!CE) return false;
781    int64_t Value = CE->getValue();
782    return ARM_AM::getT2SOImmVal(~Value) != -1;
783  }
784  bool isT2SOImmNeg() const {
785    if (Kind != k_Immediate)
786      return false;
787    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
788    if (!CE) return false;
789    int64_t Value = CE->getValue();
790    return ARM_AM::getT2SOImmVal(-Value) != -1;
791  }
792  bool isSetEndImm() const {
793    if (Kind != k_Immediate)
794      return false;
795    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
796    if (!CE) return false;
797    int64_t Value = CE->getValue();
798    return Value == 1 || Value == 0;
799  }
800  bool isReg() const { return Kind == k_Register; }
801  bool isRegList() const { return Kind == k_RegisterList; }
802  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
803  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
804  bool isToken() const { return Kind == k_Token; }
805  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
806  bool isMemory() const { return Kind == k_Memory; }
807  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
808  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
809  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
810  bool isRotImm() const { return Kind == k_RotateImmediate; }
811  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
812  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
813  bool isPostIdxReg() const {
814    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
815  }
816  bool isMemNoOffset(bool alignOK = false) const {
817    if (!isMemory())
818      return false;
819    // No offset of any kind.
820    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
821     (alignOK || Memory.Alignment == 0);
822  }
823  bool isAlignedMemory() const {
824    return isMemNoOffset(true);
825  }
826  bool isAddrMode2() const {
827    if (!isMemory() || Memory.Alignment != 0) return false;
828    // Check for register offset.
829    if (Memory.OffsetRegNum) return true;
830    // Immediate offset in range [-4095, 4095].
831    if (!Memory.OffsetImm) return true;
832    int64_t Val = Memory.OffsetImm->getValue();
833    return Val > -4096 && Val < 4096;
834  }
835  bool isAM2OffsetImm() const {
836    if (Kind != k_Immediate)
837      return false;
838    // Immediate offset in range [-4095, 4095].
839    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
840    if (!CE) return false;
841    int64_t Val = CE->getValue();
842    return Val > -4096 && Val < 4096;
843  }
844  bool isAddrMode3() const {
845    if (!isMemory() || Memory.Alignment != 0) return false;
846    // No shifts are legal for AM3.
847    if (Memory.ShiftType != ARM_AM::no_shift) return false;
848    // Check for register offset.
849    if (Memory.OffsetRegNum) return true;
850    // Immediate offset in range [-255, 255].
851    if (!Memory.OffsetImm) return true;
852    int64_t Val = Memory.OffsetImm->getValue();
853    return Val > -256 && Val < 256;
854  }
855  bool isAM3Offset() const {
856    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
857      return false;
858    if (Kind == k_PostIndexRegister)
859      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
860    // Immediate offset in range [-255, 255].
861    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
862    if (!CE) return false;
863    int64_t Val = CE->getValue();
864    // Special case, #-0 is INT32_MIN.
865    return (Val > -256 && Val < 256) || Val == INT32_MIN;
866  }
867  bool isAddrMode5() const {
868    // If we have an immediate that's not a constant, treat it as a label
869    // reference needing a fixup. If it is a constant, it's something else
870    // and we reject it.
871    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
872      return true;
873    if (!isMemory() || Memory.Alignment != 0) return false;
874    // Check for register offset.
875    if (Memory.OffsetRegNum) return false;
876    // Immediate offset in range [-1020, 1020] and a multiple of 4.
877    if (!Memory.OffsetImm) return true;
878    int64_t Val = Memory.OffsetImm->getValue();
879    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
880      Val == INT32_MIN;
881  }
882  bool isMemTBB() const {
883    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
884        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
885      return false;
886    return true;
887  }
888  bool isMemTBH() const {
889    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
890        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
891        Memory.Alignment != 0 )
892      return false;
893    return true;
894  }
895  bool isMemRegOffset() const {
896    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
897      return false;
898    return true;
899  }
900  bool isT2MemRegOffset() const {
901    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
902        Memory.Alignment != 0)
903      return false;
904    // Only lsl #{0, 1, 2, 3} allowed.
905    if (Memory.ShiftType == ARM_AM::no_shift)
906      return true;
907    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
908      return false;
909    return true;
910  }
911  bool isMemThumbRR() const {
912    // Thumb reg+reg addressing is simple. Just two registers, a base and
913    // an offset. No shifts, negations or any other complicating factors.
914    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
915        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
916      return false;
917    return isARMLowRegister(Memory.BaseRegNum) &&
918      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
919  }
920  bool isMemThumbRIs4() const {
921    if (!isMemory() || Memory.OffsetRegNum != 0 ||
922        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
923      return false;
924    // Immediate offset, multiple of 4 in range [0, 124].
925    if (!Memory.OffsetImm) return true;
926    int64_t Val = Memory.OffsetImm->getValue();
927    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
928  }
929  bool isMemThumbRIs2() const {
930    if (!isMemory() || Memory.OffsetRegNum != 0 ||
931        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
932      return false;
933    // Immediate offset, multiple of 4 in range [0, 62].
934    if (!Memory.OffsetImm) return true;
935    int64_t Val = Memory.OffsetImm->getValue();
936    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
937  }
938  bool isMemThumbRIs1() const {
939    if (!isMemory() || Memory.OffsetRegNum != 0 ||
940        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
941      return false;
942    // Immediate offset in range [0, 31].
943    if (!Memory.OffsetImm) return true;
944    int64_t Val = Memory.OffsetImm->getValue();
945    return Val >= 0 && Val <= 31;
946  }
947  bool isMemThumbSPI() const {
948    if (!isMemory() || Memory.OffsetRegNum != 0 ||
949        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
950      return false;
951    // Immediate offset, multiple of 4 in range [0, 1020].
952    if (!Memory.OffsetImm) return true;
953    int64_t Val = Memory.OffsetImm->getValue();
954    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
955  }
956  bool isMemImm8s4Offset() const {
957    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
958      return false;
959    // Immediate offset a multiple of 4 in range [-1020, 1020].
960    if (!Memory.OffsetImm) return true;
961    int64_t Val = Memory.OffsetImm->getValue();
962    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
963  }
964  bool isMemImm0_1020s4Offset() const {
965    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
966      return false;
967    // Immediate offset a multiple of 4 in range [0, 1020].
968    if (!Memory.OffsetImm) return true;
969    int64_t Val = Memory.OffsetImm->getValue();
970    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
971  }
972  bool isMemImm8Offset() const {
973    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
974      return false;
975    // Immediate offset in range [-255, 255].
976    if (!Memory.OffsetImm) return true;
977    int64_t Val = Memory.OffsetImm->getValue();
978    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
979  }
980  bool isMemPosImm8Offset() const {
981    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
982      return false;
983    // Immediate offset in range [0, 255].
984    if (!Memory.OffsetImm) return true;
985    int64_t Val = Memory.OffsetImm->getValue();
986    return Val >= 0 && Val < 256;
987  }
988  bool isMemNegImm8Offset() const {
989    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
990      return false;
991    // Immediate offset in range [-255, -1].
992    if (!Memory.OffsetImm) return false;
993    int64_t Val = Memory.OffsetImm->getValue();
994    return (Val == INT32_MIN) || (Val > -256 && Val < 0);
995  }
996  bool isMemUImm12Offset() const {
997    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
998      return false;
999    // Immediate offset in range [0, 4095].
1000    if (!Memory.OffsetImm) return true;
1001    int64_t Val = Memory.OffsetImm->getValue();
1002    return (Val >= 0 && Val < 4096);
1003  }
1004  bool isMemImm12Offset() const {
1005    // If we have an immediate that's not a constant, treat it as a label
1006    // reference needing a fixup. If it is a constant, it's something else
1007    // and we reject it.
1008    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
1009      return true;
1010
1011    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
1012      return false;
1013    // Immediate offset in range [-4095, 4095].
1014    if (!Memory.OffsetImm) return true;
1015    int64_t Val = Memory.OffsetImm->getValue();
1016    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
1017  }
1018  bool isPostIdxImm8() const {
1019    if (Kind != k_Immediate)
1020      return false;
1021    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1022    if (!CE) return false;
1023    int64_t Val = CE->getValue();
1024    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
1025  }
1026  bool isPostIdxImm8s4() const {
1027    if (Kind != k_Immediate)
1028      return false;
1029    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1030    if (!CE) return false;
1031    int64_t Val = CE->getValue();
1032    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
1033      (Val == INT32_MIN);
1034  }
1035
1036  bool isMSRMask() const { return Kind == k_MSRMask; }
1037  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
1038
1039  // NEON operands.
1040  bool isVecListOneD() const {
1041    if (Kind != k_VectorList) return false;
1042    return VectorList.Count == 1;
1043  }
1044
1045  bool isVecListTwoD() const {
1046    if (Kind != k_VectorList) return false;
1047    return VectorList.Count == 2;
1048  }
1049
1050  bool isVecListThreeD() const {
1051    if (Kind != k_VectorList) return false;
1052    return VectorList.Count == 3;
1053  }
1054
1055  bool isVecListFourD() const {
1056    if (Kind != k_VectorList) return false;
1057    return VectorList.Count == 4;
1058  }
1059
1060  bool isVecListTwoQ() const {
1061    if (Kind != k_VectorList) return false;
1062    //FIXME: We haven't taught the parser to handle by-two register lists
1063    // yet, so don't pretend to know one.
1064    return VectorList.Count == 2 && false;
1065  }
1066
1067  bool isVecListOneDAllLanes() const {
1068    if (Kind != k_VectorListAllLanes) return false;
1069    return VectorList.Count == 1;
1070  }
1071
1072  bool isVecListTwoDAllLanes() const {
1073    if (Kind != k_VectorListAllLanes) return false;
1074    return VectorList.Count == 2;
1075  }
1076
1077  bool isVecListOneDByteIndexed() const {
1078    if (Kind != k_VectorListIndexed) return false;
1079    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1080  }
1081
1082  bool isVectorIndex8() const {
1083    if (Kind != k_VectorIndex) return false;
1084    return VectorIndex.Val < 8;
1085  }
1086  bool isVectorIndex16() const {
1087    if (Kind != k_VectorIndex) return false;
1088    return VectorIndex.Val < 4;
1089  }
1090  bool isVectorIndex32() const {
1091    if (Kind != k_VectorIndex) return false;
1092    return VectorIndex.Val < 2;
1093  }
1094
1095  bool isNEONi8splat() const {
1096    if (Kind != k_Immediate)
1097      return false;
1098    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1099    // Must be a constant.
1100    if (!CE) return false;
1101    int64_t Value = CE->getValue();
1102    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1103    // value.
1104    return Value >= 0 && Value < 256;
1105  }
1106
1107  bool isNEONi16splat() const {
1108    if (Kind != k_Immediate)
1109      return false;
1110    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1111    // Must be a constant.
1112    if (!CE) return false;
1113    int64_t Value = CE->getValue();
1114    // i16 value in the range [0,255] or [0x0100, 0xff00]
1115    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1116  }
1117
1118  bool isNEONi32splat() const {
1119    if (Kind != k_Immediate)
1120      return false;
1121    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1122    // Must be a constant.
1123    if (!CE) return false;
1124    int64_t Value = CE->getValue();
1125    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1126    return (Value >= 0 && Value < 256) ||
1127      (Value >= 0x0100 && Value <= 0xff00) ||
1128      (Value >= 0x010000 && Value <= 0xff0000) ||
1129      (Value >= 0x01000000 && Value <= 0xff000000);
1130  }
1131
1132  bool isNEONi32vmov() const {
1133    if (Kind != k_Immediate)
1134      return false;
1135    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1136    // Must be a constant.
1137    if (!CE) return false;
1138    int64_t Value = CE->getValue();
1139    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1140    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1141    return (Value >= 0 && Value < 256) ||
1142      (Value >= 0x0100 && Value <= 0xff00) ||
1143      (Value >= 0x010000 && Value <= 0xff0000) ||
1144      (Value >= 0x01000000 && Value <= 0xff000000) ||
1145      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1146      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1147  }
1148
1149  bool isNEONi64splat() const {
1150    if (Kind != k_Immediate)
1151      return false;
1152    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1153    // Must be a constant.
1154    if (!CE) return false;
1155    uint64_t Value = CE->getValue();
1156    // i64 value with each byte being either 0 or 0xff.
1157    for (unsigned i = 0; i < 8; ++i)
1158      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1159    return true;
1160  }
1161
1162  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1163    // Add as immediates when possible.  Null MCExpr = 0.
1164    if (Expr == 0)
1165      Inst.addOperand(MCOperand::CreateImm(0));
1166    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1167      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1168    else
1169      Inst.addOperand(MCOperand::CreateExpr(Expr));
1170  }
1171
1172  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1173    assert(N == 2 && "Invalid number of operands!");
1174    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1175    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1176    Inst.addOperand(MCOperand::CreateReg(RegNum));
1177  }
1178
1179  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1180    assert(N == 1 && "Invalid number of operands!");
1181    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1182  }
1183
1184  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1185    assert(N == 1 && "Invalid number of operands!");
1186    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1187  }
1188
1189  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1190    assert(N == 1 && "Invalid number of operands!");
1191    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1192  }
1193
1194  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1195    assert(N == 1 && "Invalid number of operands!");
1196    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1197  }
1198
1199  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1200    assert(N == 1 && "Invalid number of operands!");
1201    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1202  }
1203
1204  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1205    assert(N == 1 && "Invalid number of operands!");
1206    Inst.addOperand(MCOperand::CreateReg(getReg()));
1207  }
1208
1209  void addRegOperands(MCInst &Inst, unsigned N) const {
1210    assert(N == 1 && "Invalid number of operands!");
1211    Inst.addOperand(MCOperand::CreateReg(getReg()));
1212  }
1213
1214  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1215    assert(N == 3 && "Invalid number of operands!");
1216    assert(isRegShiftedReg() &&
1217           "addRegShiftedRegOperands() on non RegShiftedReg!");
1218    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1219    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1220    Inst.addOperand(MCOperand::CreateImm(
1221      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1222  }
1223
1224  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1225    assert(N == 2 && "Invalid number of operands!");
1226    assert(isRegShiftedImm() &&
1227           "addRegShiftedImmOperands() on non RegShiftedImm!");
1228    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1229    Inst.addOperand(MCOperand::CreateImm(
1230      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1231  }
1232
1233  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1234    assert(N == 1 && "Invalid number of operands!");
1235    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1236                                         ShifterImm.Imm));
1237  }
1238
1239  void addRegListOperands(MCInst &Inst, unsigned N) const {
1240    assert(N == 1 && "Invalid number of operands!");
1241    const SmallVectorImpl<unsigned> &RegList = getRegList();
1242    for (SmallVectorImpl<unsigned>::const_iterator
1243           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1244      Inst.addOperand(MCOperand::CreateReg(*I));
1245  }
1246
1247  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1248    addRegListOperands(Inst, N);
1249  }
1250
1251  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1252    addRegListOperands(Inst, N);
1253  }
1254
1255  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1256    assert(N == 1 && "Invalid number of operands!");
1257    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1258    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1259  }
1260
1261  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1262    assert(N == 1 && "Invalid number of operands!");
1263    // Munge the lsb/width into a bitfield mask.
1264    unsigned lsb = Bitfield.LSB;
1265    unsigned width = Bitfield.Width;
1266    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1267    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1268                      (32 - (lsb + width)));
1269    Inst.addOperand(MCOperand::CreateImm(Mask));
1270  }
1271
1272  void addImmOperands(MCInst &Inst, unsigned N) const {
1273    assert(N == 1 && "Invalid number of operands!");
1274    addExpr(Inst, getImm());
1275  }
1276
1277  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1278    assert(N == 1 && "Invalid number of operands!");
1279    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1280  }
1281
1282  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1283    assert(N == 1 && "Invalid number of operands!");
1284    // FIXME: We really want to scale the value here, but the LDRD/STRD
1285    // instruction don't encode operands that way yet.
1286    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1287    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1288  }
1289
1290  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1291    assert(N == 1 && "Invalid number of operands!");
1292    // The immediate is scaled by four in the encoding and is stored
1293    // in the MCInst as such. Lop off the low two bits here.
1294    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1295    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1296  }
1297
1298  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1299    assert(N == 1 && "Invalid number of operands!");
1300    // The immediate is scaled by four in the encoding and is stored
1301    // in the MCInst as such. Lop off the low two bits here.
1302    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1303    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1304  }
1305
1306  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1307    assert(N == 1 && "Invalid number of operands!");
1308    // The constant encodes as the immediate-1, and we store in the instruction
1309    // the bits as encoded, so subtract off one here.
1310    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1311    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1312  }
1313
1314  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1315    assert(N == 1 && "Invalid number of operands!");
1316    // The constant encodes as the immediate-1, and we store in the instruction
1317    // the bits as encoded, so subtract off one here.
1318    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1319    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1320  }
1321
1322  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1323    assert(N == 1 && "Invalid number of operands!");
1324    // The constant encodes as the immediate, except for 32, which encodes as
1325    // zero.
1326    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1327    unsigned Imm = CE->getValue();
1328    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1329  }
1330
1331  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1332    assert(N == 1 && "Invalid number of operands!");
1333    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1334    // the instruction as well.
1335    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1336    int Val = CE->getValue();
1337    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1338  }
1339
1340  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1341    assert(N == 1 && "Invalid number of operands!");
1342    // The operand is actually a t2_so_imm, but we have its bitwise
1343    // negation in the assembly source, so twiddle it here.
1344    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1345    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1346  }
1347
1348  void addT2SOImmNegOperands(MCInst &Inst, unsigned N) const {
1349    assert(N == 1 && "Invalid number of operands!");
1350    // The operand is actually a t2_so_imm, but we have its
1351    // negation in the assembly source, so twiddle it here.
1352    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1353    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1354  }
1355
1356  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1357    assert(N == 1 && "Invalid number of operands!");
1358    // The operand is actually a so_imm, but we have its bitwise
1359    // negation in the assembly source, so twiddle it here.
1360    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1361    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1362  }
1363
1364  void addARMSOImmNegOperands(MCInst &Inst, unsigned N) const {
1365    assert(N == 1 && "Invalid number of operands!");
1366    // The operand is actually a so_imm, but we have its
1367    // negation in the assembly source, so twiddle it here.
1368    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1369    Inst.addOperand(MCOperand::CreateImm(-CE->getValue()));
1370  }
1371
1372  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1373    assert(N == 1 && "Invalid number of operands!");
1374    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1375  }
1376
1377  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1378    assert(N == 1 && "Invalid number of operands!");
1379    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1380  }
1381
1382  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1383    assert(N == 2 && "Invalid number of operands!");
1384    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1385    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1386  }
1387
1388  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1389    assert(N == 3 && "Invalid number of operands!");
1390    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1391    if (!Memory.OffsetRegNum) {
1392      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1393      // Special case for #-0
1394      if (Val == INT32_MIN) Val = 0;
1395      if (Val < 0) Val = -Val;
1396      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1397    } else {
1398      // For register offset, we encode the shift type and negation flag
1399      // here.
1400      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1401                              Memory.ShiftImm, Memory.ShiftType);
1402    }
1403    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1404    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1405    Inst.addOperand(MCOperand::CreateImm(Val));
1406  }
1407
1408  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1409    assert(N == 2 && "Invalid number of operands!");
1410    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1411    assert(CE && "non-constant AM2OffsetImm operand!");
1412    int32_t Val = CE->getValue();
1413    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1414    // Special case for #-0
1415    if (Val == INT32_MIN) Val = 0;
1416    if (Val < 0) Val = -Val;
1417    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1418    Inst.addOperand(MCOperand::CreateReg(0));
1419    Inst.addOperand(MCOperand::CreateImm(Val));
1420  }
1421
1422  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1423    assert(N == 3 && "Invalid number of operands!");
1424    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1425    if (!Memory.OffsetRegNum) {
1426      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1427      // Special case for #-0
1428      if (Val == INT32_MIN) Val = 0;
1429      if (Val < 0) Val = -Val;
1430      Val = ARM_AM::getAM3Opc(AddSub, Val);
1431    } else {
1432      // For register offset, we encode the shift type and negation flag
1433      // here.
1434      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1435    }
1436    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1437    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1438    Inst.addOperand(MCOperand::CreateImm(Val));
1439  }
1440
1441  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1442    assert(N == 2 && "Invalid number of operands!");
1443    if (Kind == k_PostIndexRegister) {
1444      int32_t Val =
1445        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1446      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1447      Inst.addOperand(MCOperand::CreateImm(Val));
1448      return;
1449    }
1450
1451    // Constant offset.
1452    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1453    int32_t Val = CE->getValue();
1454    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1455    // Special case for #-0
1456    if (Val == INT32_MIN) Val = 0;
1457    if (Val < 0) Val = -Val;
1458    Val = ARM_AM::getAM3Opc(AddSub, Val);
1459    Inst.addOperand(MCOperand::CreateReg(0));
1460    Inst.addOperand(MCOperand::CreateImm(Val));
1461  }
1462
1463  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1464    assert(N == 2 && "Invalid number of operands!");
1465    // If we have an immediate that's not a constant, treat it as a label
1466    // reference needing a fixup. If it is a constant, it's something else
1467    // and we reject it.
1468    if (isImm()) {
1469      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1470      Inst.addOperand(MCOperand::CreateImm(0));
1471      return;
1472    }
1473
1474    // The lower two bits are always zero and as such are not encoded.
1475    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1476    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1477    // Special case for #-0
1478    if (Val == INT32_MIN) Val = 0;
1479    if (Val < 0) Val = -Val;
1480    Val = ARM_AM::getAM5Opc(AddSub, Val);
1481    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1482    Inst.addOperand(MCOperand::CreateImm(Val));
1483  }
1484
1485  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1486    assert(N == 2 && "Invalid number of operands!");
1487    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1488    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1489    Inst.addOperand(MCOperand::CreateImm(Val));
1490  }
1491
1492  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1493    assert(N == 2 && "Invalid number of operands!");
1494    // The lower two bits are always zero and as such are not encoded.
1495    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1496    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1497    Inst.addOperand(MCOperand::CreateImm(Val));
1498  }
1499
1500  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1501    assert(N == 2 && "Invalid number of operands!");
1502    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1503    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1504    Inst.addOperand(MCOperand::CreateImm(Val));
1505  }
1506
1507  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1508    addMemImm8OffsetOperands(Inst, N);
1509  }
1510
1511  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1512    addMemImm8OffsetOperands(Inst, N);
1513  }
1514
1515  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1516    assert(N == 2 && "Invalid number of operands!");
1517    // If this is an immediate, it's a label reference.
1518    if (Kind == k_Immediate) {
1519      addExpr(Inst, getImm());
1520      Inst.addOperand(MCOperand::CreateImm(0));
1521      return;
1522    }
1523
1524    // Otherwise, it's a normal memory reg+offset.
1525    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1526    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1527    Inst.addOperand(MCOperand::CreateImm(Val));
1528  }
1529
1530  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1531    assert(N == 2 && "Invalid number of operands!");
1532    // If this is an immediate, it's a label reference.
1533    if (Kind == k_Immediate) {
1534      addExpr(Inst, getImm());
1535      Inst.addOperand(MCOperand::CreateImm(0));
1536      return;
1537    }
1538
1539    // Otherwise, it's a normal memory reg+offset.
1540    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1541    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1542    Inst.addOperand(MCOperand::CreateImm(Val));
1543  }
1544
1545  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1546    assert(N == 2 && "Invalid number of operands!");
1547    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1548    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1549  }
1550
1551  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1552    assert(N == 2 && "Invalid number of operands!");
1553    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1554    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1555  }
1556
1557  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1558    assert(N == 3 && "Invalid number of operands!");
1559    unsigned Val =
1560      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1561                        Memory.ShiftImm, Memory.ShiftType);
1562    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1563    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1564    Inst.addOperand(MCOperand::CreateImm(Val));
1565  }
1566
1567  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1568    assert(N == 3 && "Invalid number of operands!");
1569    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1570    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1571    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1572  }
1573
1574  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1575    assert(N == 2 && "Invalid number of operands!");
1576    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1577    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1578  }
1579
1580  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1581    assert(N == 2 && "Invalid number of operands!");
1582    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1583    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1584    Inst.addOperand(MCOperand::CreateImm(Val));
1585  }
1586
1587  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1588    assert(N == 2 && "Invalid number of operands!");
1589    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1590    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1591    Inst.addOperand(MCOperand::CreateImm(Val));
1592  }
1593
1594  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1595    assert(N == 2 && "Invalid number of operands!");
1596    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1597    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1598    Inst.addOperand(MCOperand::CreateImm(Val));
1599  }
1600
1601  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1602    assert(N == 2 && "Invalid number of operands!");
1603    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1604    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1605    Inst.addOperand(MCOperand::CreateImm(Val));
1606  }
1607
1608  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1609    assert(N == 1 && "Invalid number of operands!");
1610    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1611    assert(CE && "non-constant post-idx-imm8 operand!");
1612    int Imm = CE->getValue();
1613    bool isAdd = Imm >= 0;
1614    if (Imm == INT32_MIN) Imm = 0;
1615    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1616    Inst.addOperand(MCOperand::CreateImm(Imm));
1617  }
1618
1619  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1620    assert(N == 1 && "Invalid number of operands!");
1621    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1622    assert(CE && "non-constant post-idx-imm8s4 operand!");
1623    int Imm = CE->getValue();
1624    bool isAdd = Imm >= 0;
1625    if (Imm == INT32_MIN) Imm = 0;
1626    // Immediate is scaled by 4.
1627    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1628    Inst.addOperand(MCOperand::CreateImm(Imm));
1629  }
1630
1631  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1632    assert(N == 2 && "Invalid number of operands!");
1633    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1634    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1635  }
1636
1637  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1638    assert(N == 2 && "Invalid number of operands!");
1639    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1640    // The sign, shift type, and shift amount are encoded in a single operand
1641    // using the AM2 encoding helpers.
1642    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1643    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1644                                     PostIdxReg.ShiftTy);
1645    Inst.addOperand(MCOperand::CreateImm(Imm));
1646  }
1647
1648  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1649    assert(N == 1 && "Invalid number of operands!");
1650    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1651  }
1652
1653  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1654    assert(N == 1 && "Invalid number of operands!");
1655    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1656  }
1657
1658  void addVecListOperands(MCInst &Inst, unsigned N) const {
1659    assert(N == 1 && "Invalid number of operands!");
1660    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1661  }
1662
1663  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1664    assert(N == 2 && "Invalid number of operands!");
1665    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1666    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1667  }
1668
1669  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1670    assert(N == 1 && "Invalid number of operands!");
1671    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1672  }
1673
1674  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1675    assert(N == 1 && "Invalid number of operands!");
1676    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1677  }
1678
1679  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1680    assert(N == 1 && "Invalid number of operands!");
1681    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1682  }
1683
1684  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1685    assert(N == 1 && "Invalid number of operands!");
1686    // The immediate encodes the type of constant as well as the value.
1687    // Mask in that this is an i8 splat.
1688    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1689    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1690  }
1691
1692  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1693    assert(N == 1 && "Invalid number of operands!");
1694    // The immediate encodes the type of constant as well as the value.
1695    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1696    unsigned Value = CE->getValue();
1697    if (Value >= 256)
1698      Value = (Value >> 8) | 0xa00;
1699    else
1700      Value |= 0x800;
1701    Inst.addOperand(MCOperand::CreateImm(Value));
1702  }
1703
1704  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1705    assert(N == 1 && "Invalid number of operands!");
1706    // The immediate encodes the type of constant as well as the value.
1707    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1708    unsigned Value = CE->getValue();
1709    if (Value >= 256 && Value <= 0xff00)
1710      Value = (Value >> 8) | 0x200;
1711    else if (Value > 0xffff && Value <= 0xff0000)
1712      Value = (Value >> 16) | 0x400;
1713    else if (Value > 0xffffff)
1714      Value = (Value >> 24) | 0x600;
1715    Inst.addOperand(MCOperand::CreateImm(Value));
1716  }
1717
1718  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1719    assert(N == 1 && "Invalid number of operands!");
1720    // The immediate encodes the type of constant as well as the value.
1721    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1722    unsigned Value = CE->getValue();
1723    if (Value >= 256 && Value <= 0xffff)
1724      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1725    else if (Value > 0xffff && Value <= 0xffffff)
1726      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1727    else if (Value > 0xffffff)
1728      Value = (Value >> 24) | 0x600;
1729    Inst.addOperand(MCOperand::CreateImm(Value));
1730  }
1731
1732  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1733    assert(N == 1 && "Invalid number of operands!");
1734    // The immediate encodes the type of constant as well as the value.
1735    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1736    uint64_t Value = CE->getValue();
1737    unsigned Imm = 0;
1738    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1739      Imm |= (Value & 1) << i;
1740    }
1741    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1742  }
1743
1744  virtual void print(raw_ostream &OS) const;
1745
1746  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1747    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1748    Op->ITMask.Mask = Mask;
1749    Op->StartLoc = S;
1750    Op->EndLoc = S;
1751    return Op;
1752  }
1753
1754  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1755    ARMOperand *Op = new ARMOperand(k_CondCode);
1756    Op->CC.Val = CC;
1757    Op->StartLoc = S;
1758    Op->EndLoc = S;
1759    return Op;
1760  }
1761
1762  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1763    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1764    Op->Cop.Val = CopVal;
1765    Op->StartLoc = S;
1766    Op->EndLoc = S;
1767    return Op;
1768  }
1769
1770  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1771    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1772    Op->Cop.Val = CopVal;
1773    Op->StartLoc = S;
1774    Op->EndLoc = S;
1775    return Op;
1776  }
1777
1778  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1779    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1780    Op->Cop.Val = Val;
1781    Op->StartLoc = S;
1782    Op->EndLoc = E;
1783    return Op;
1784  }
1785
1786  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1787    ARMOperand *Op = new ARMOperand(k_CCOut);
1788    Op->Reg.RegNum = RegNum;
1789    Op->StartLoc = S;
1790    Op->EndLoc = S;
1791    return Op;
1792  }
1793
1794  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1795    ARMOperand *Op = new ARMOperand(k_Token);
1796    Op->Tok.Data = Str.data();
1797    Op->Tok.Length = Str.size();
1798    Op->StartLoc = S;
1799    Op->EndLoc = S;
1800    return Op;
1801  }
1802
1803  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1804    ARMOperand *Op = new ARMOperand(k_Register);
1805    Op->Reg.RegNum = RegNum;
1806    Op->StartLoc = S;
1807    Op->EndLoc = E;
1808    return Op;
1809  }
1810
1811  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1812                                           unsigned SrcReg,
1813                                           unsigned ShiftReg,
1814                                           unsigned ShiftImm,
1815                                           SMLoc S, SMLoc E) {
1816    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1817    Op->RegShiftedReg.ShiftTy = ShTy;
1818    Op->RegShiftedReg.SrcReg = SrcReg;
1819    Op->RegShiftedReg.ShiftReg = ShiftReg;
1820    Op->RegShiftedReg.ShiftImm = ShiftImm;
1821    Op->StartLoc = S;
1822    Op->EndLoc = E;
1823    return Op;
1824  }
1825
1826  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1827                                            unsigned SrcReg,
1828                                            unsigned ShiftImm,
1829                                            SMLoc S, SMLoc E) {
1830    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1831    Op->RegShiftedImm.ShiftTy = ShTy;
1832    Op->RegShiftedImm.SrcReg = SrcReg;
1833    Op->RegShiftedImm.ShiftImm = ShiftImm;
1834    Op->StartLoc = S;
1835    Op->EndLoc = E;
1836    return Op;
1837  }
1838
1839  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1840                                   SMLoc S, SMLoc E) {
1841    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1842    Op->ShifterImm.isASR = isASR;
1843    Op->ShifterImm.Imm = Imm;
1844    Op->StartLoc = S;
1845    Op->EndLoc = E;
1846    return Op;
1847  }
1848
1849  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1850    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1851    Op->RotImm.Imm = Imm;
1852    Op->StartLoc = S;
1853    Op->EndLoc = E;
1854    return Op;
1855  }
1856
1857  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1858                                    SMLoc S, SMLoc E) {
1859    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1860    Op->Bitfield.LSB = LSB;
1861    Op->Bitfield.Width = Width;
1862    Op->StartLoc = S;
1863    Op->EndLoc = E;
1864    return Op;
1865  }
1866
1867  static ARMOperand *
1868  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1869                SMLoc StartLoc, SMLoc EndLoc) {
1870    KindTy Kind = k_RegisterList;
1871
1872    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1873      Kind = k_DPRRegisterList;
1874    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
1875             contains(Regs.front().first))
1876      Kind = k_SPRRegisterList;
1877
1878    ARMOperand *Op = new ARMOperand(Kind);
1879    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
1880           I = Regs.begin(), E = Regs.end(); I != E; ++I)
1881      Op->Registers.push_back(I->first);
1882    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
1883    Op->StartLoc = StartLoc;
1884    Op->EndLoc = EndLoc;
1885    return Op;
1886  }
1887
1888  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
1889                                      SMLoc S, SMLoc E) {
1890    ARMOperand *Op = new ARMOperand(k_VectorList);
1891    Op->VectorList.RegNum = RegNum;
1892    Op->VectorList.Count = Count;
1893    Op->StartLoc = S;
1894    Op->EndLoc = E;
1895    return Op;
1896  }
1897
1898  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
1899                                              SMLoc S, SMLoc E) {
1900    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
1901    Op->VectorList.RegNum = RegNum;
1902    Op->VectorList.Count = Count;
1903    Op->StartLoc = S;
1904    Op->EndLoc = E;
1905    return Op;
1906  }
1907
1908  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
1909                                             unsigned Index, SMLoc S, SMLoc E) {
1910    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
1911    Op->VectorList.RegNum = RegNum;
1912    Op->VectorList.Count = Count;
1913    Op->VectorList.LaneIndex = Index;
1914    Op->StartLoc = S;
1915    Op->EndLoc = E;
1916    return Op;
1917  }
1918
1919  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1920                                       MCContext &Ctx) {
1921    ARMOperand *Op = new ARMOperand(k_VectorIndex);
1922    Op->VectorIndex.Val = Idx;
1923    Op->StartLoc = S;
1924    Op->EndLoc = E;
1925    return Op;
1926  }
1927
1928  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
1929    ARMOperand *Op = new ARMOperand(k_Immediate);
1930    Op->Imm.Val = Val;
1931    Op->StartLoc = S;
1932    Op->EndLoc = E;
1933    return Op;
1934  }
1935
1936  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1937    ARMOperand *Op = new ARMOperand(k_FPImmediate);
1938    Op->FPImm.Val = Val;
1939    Op->StartLoc = S;
1940    Op->EndLoc = S;
1941    return Op;
1942  }
1943
1944  static ARMOperand *CreateMem(unsigned BaseRegNum,
1945                               const MCConstantExpr *OffsetImm,
1946                               unsigned OffsetRegNum,
1947                               ARM_AM::ShiftOpc ShiftType,
1948                               unsigned ShiftImm,
1949                               unsigned Alignment,
1950                               bool isNegative,
1951                               SMLoc S, SMLoc E) {
1952    ARMOperand *Op = new ARMOperand(k_Memory);
1953    Op->Memory.BaseRegNum = BaseRegNum;
1954    Op->Memory.OffsetImm = OffsetImm;
1955    Op->Memory.OffsetRegNum = OffsetRegNum;
1956    Op->Memory.ShiftType = ShiftType;
1957    Op->Memory.ShiftImm = ShiftImm;
1958    Op->Memory.Alignment = Alignment;
1959    Op->Memory.isNegative = isNegative;
1960    Op->StartLoc = S;
1961    Op->EndLoc = E;
1962    return Op;
1963  }
1964
1965  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
1966                                      ARM_AM::ShiftOpc ShiftTy,
1967                                      unsigned ShiftImm,
1968                                      SMLoc S, SMLoc E) {
1969    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
1970    Op->PostIdxReg.RegNum = RegNum;
1971    Op->PostIdxReg.isAdd = isAdd;
1972    Op->PostIdxReg.ShiftTy = ShiftTy;
1973    Op->PostIdxReg.ShiftImm = ShiftImm;
1974    Op->StartLoc = S;
1975    Op->EndLoc = E;
1976    return Op;
1977  }
1978
1979  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
1980    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
1981    Op->MBOpt.Val = Opt;
1982    Op->StartLoc = S;
1983    Op->EndLoc = S;
1984    return Op;
1985  }
1986
1987  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
1988    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
1989    Op->IFlags.Val = IFlags;
1990    Op->StartLoc = S;
1991    Op->EndLoc = S;
1992    return Op;
1993  }
1994
1995  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
1996    ARMOperand *Op = new ARMOperand(k_MSRMask);
1997    Op->MMask.Val = MMask;
1998    Op->StartLoc = S;
1999    Op->EndLoc = S;
2000    return Op;
2001  }
2002};
2003
2004} // end anonymous namespace.
2005
2006void ARMOperand::print(raw_ostream &OS) const {
2007  switch (Kind) {
2008  case k_FPImmediate:
2009    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
2010       << ") >";
2011    break;
2012  case k_CondCode:
2013    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
2014    break;
2015  case k_CCOut:
2016    OS << "<ccout " << getReg() << ">";
2017    break;
2018  case k_ITCondMask: {
2019    static const char *MaskStr[] = {
2020      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
2021      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
2022    };
2023    assert((ITMask.Mask & 0xf) == ITMask.Mask);
2024    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
2025    break;
2026  }
2027  case k_CoprocNum:
2028    OS << "<coprocessor number: " << getCoproc() << ">";
2029    break;
2030  case k_CoprocReg:
2031    OS << "<coprocessor register: " << getCoproc() << ">";
2032    break;
2033  case k_CoprocOption:
2034    OS << "<coprocessor option: " << CoprocOption.Val << ">";
2035    break;
2036  case k_MSRMask:
2037    OS << "<mask: " << getMSRMask() << ">";
2038    break;
2039  case k_Immediate:
2040    getImm()->print(OS);
2041    break;
2042  case k_MemBarrierOpt:
2043    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
2044    break;
2045  case k_Memory:
2046    OS << "<memory "
2047       << " base:" << Memory.BaseRegNum;
2048    OS << ">";
2049    break;
2050  case k_PostIndexRegister:
2051    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
2052       << PostIdxReg.RegNum;
2053    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
2054      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
2055         << PostIdxReg.ShiftImm;
2056    OS << ">";
2057    break;
2058  case k_ProcIFlags: {
2059    OS << "<ARM_PROC::";
2060    unsigned IFlags = getProcIFlags();
2061    for (int i=2; i >= 0; --i)
2062      if (IFlags & (1 << i))
2063        OS << ARM_PROC::IFlagsToString(1 << i);
2064    OS << ">";
2065    break;
2066  }
2067  case k_Register:
2068    OS << "<register " << getReg() << ">";
2069    break;
2070  case k_ShifterImmediate:
2071    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
2072       << " #" << ShifterImm.Imm << ">";
2073    break;
2074  case k_ShiftedRegister:
2075    OS << "<so_reg_reg "
2076       << RegShiftedReg.SrcReg << " "
2077       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
2078       << " " << RegShiftedReg.ShiftReg << ">";
2079    break;
2080  case k_ShiftedImmediate:
2081    OS << "<so_reg_imm "
2082       << RegShiftedImm.SrcReg << " "
2083       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
2084       << " #" << RegShiftedImm.ShiftImm << ">";
2085    break;
2086  case k_RotateImmediate:
2087    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
2088    break;
2089  case k_BitfieldDescriptor:
2090    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2091       << ", width: " << Bitfield.Width << ">";
2092    break;
2093  case k_RegisterList:
2094  case k_DPRRegisterList:
2095  case k_SPRRegisterList: {
2096    OS << "<register_list ";
2097
2098    const SmallVectorImpl<unsigned> &RegList = getRegList();
2099    for (SmallVectorImpl<unsigned>::const_iterator
2100           I = RegList.begin(), E = RegList.end(); I != E; ) {
2101      OS << *I;
2102      if (++I < E) OS << ", ";
2103    }
2104
2105    OS << ">";
2106    break;
2107  }
2108  case k_VectorList:
2109    OS << "<vector_list " << VectorList.Count << " * "
2110       << VectorList.RegNum << ">";
2111    break;
2112  case k_VectorListAllLanes:
2113    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2114       << VectorList.RegNum << ">";
2115    break;
2116  case k_VectorListIndexed:
2117    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2118       << VectorList.Count << " * " << VectorList.RegNum << ">";
2119    break;
2120  case k_Token:
2121    OS << "'" << getToken() << "'";
2122    break;
2123  case k_VectorIndex:
2124    OS << "<vectorindex " << getVectorIndex() << ">";
2125    break;
2126  }
2127}
2128
2129/// @name Auto-generated Match Functions
2130/// {
2131
2132static unsigned MatchRegisterName(StringRef Name);
2133
2134/// }
2135
2136bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2137                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2138  RegNo = tryParseRegister();
2139
2140  return (RegNo == (unsigned)-1);
2141}
2142
2143/// Try to parse a register name.  The token must be an Identifier when called,
2144/// and if it is a register name the token is eaten and the register number is
2145/// returned.  Otherwise return -1.
2146///
2147int ARMAsmParser::tryParseRegister() {
2148  const AsmToken &Tok = Parser.getTok();
2149  if (Tok.isNot(AsmToken::Identifier)) return -1;
2150
2151  std::string lowerCase = Tok.getString().lower();
2152  unsigned RegNum = MatchRegisterName(lowerCase);
2153  if (!RegNum) {
2154    RegNum = StringSwitch<unsigned>(lowerCase)
2155      .Case("r13", ARM::SP)
2156      .Case("r14", ARM::LR)
2157      .Case("r15", ARM::PC)
2158      .Case("ip", ARM::R12)
2159      // Additional register name aliases for 'gas' compatibility.
2160      .Case("a1", ARM::R0)
2161      .Case("a2", ARM::R1)
2162      .Case("a3", ARM::R2)
2163      .Case("a4", ARM::R3)
2164      .Case("v1", ARM::R4)
2165      .Case("v2", ARM::R5)
2166      .Case("v3", ARM::R6)
2167      .Case("v4", ARM::R7)
2168      .Case("v5", ARM::R8)
2169      .Case("v6", ARM::R9)
2170      .Case("v7", ARM::R10)
2171      .Case("v8", ARM::R11)
2172      .Case("sb", ARM::R9)
2173      .Case("sl", ARM::R10)
2174      .Case("fp", ARM::R11)
2175      .Default(0);
2176  }
2177  if (!RegNum) return -1;
2178
2179  Parser.Lex(); // Eat identifier token.
2180
2181  return RegNum;
2182}
2183
2184// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2185// If a recoverable error occurs, return 1. If an irrecoverable error
2186// occurs, return -1. An irrecoverable error is one where tokens have been
2187// consumed in the process of trying to parse the shifter (i.e., when it is
2188// indeed a shifter operand, but malformed).
2189int ARMAsmParser::tryParseShiftRegister(
2190                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2191  SMLoc S = Parser.getTok().getLoc();
2192  const AsmToken &Tok = Parser.getTok();
2193  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2194
2195  std::string lowerCase = Tok.getString().lower();
2196  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2197      .Case("asl", ARM_AM::lsl)
2198      .Case("lsl", ARM_AM::lsl)
2199      .Case("lsr", ARM_AM::lsr)
2200      .Case("asr", ARM_AM::asr)
2201      .Case("ror", ARM_AM::ror)
2202      .Case("rrx", ARM_AM::rrx)
2203      .Default(ARM_AM::no_shift);
2204
2205  if (ShiftTy == ARM_AM::no_shift)
2206    return 1;
2207
2208  Parser.Lex(); // Eat the operator.
2209
2210  // The source register for the shift has already been added to the
2211  // operand list, so we need to pop it off and combine it into the shifted
2212  // register operand instead.
2213  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2214  if (!PrevOp->isReg())
2215    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2216  int SrcReg = PrevOp->getReg();
2217  int64_t Imm = 0;
2218  int ShiftReg = 0;
2219  if (ShiftTy == ARM_AM::rrx) {
2220    // RRX Doesn't have an explicit shift amount. The encoder expects
2221    // the shift register to be the same as the source register. Seems odd,
2222    // but OK.
2223    ShiftReg = SrcReg;
2224  } else {
2225    // Figure out if this is shifted by a constant or a register (for non-RRX).
2226    if (Parser.getTok().is(AsmToken::Hash)) {
2227      Parser.Lex(); // Eat hash.
2228      SMLoc ImmLoc = Parser.getTok().getLoc();
2229      const MCExpr *ShiftExpr = 0;
2230      if (getParser().ParseExpression(ShiftExpr)) {
2231        Error(ImmLoc, "invalid immediate shift value");
2232        return -1;
2233      }
2234      // The expression must be evaluatable as an immediate.
2235      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2236      if (!CE) {
2237        Error(ImmLoc, "invalid immediate shift value");
2238        return -1;
2239      }
2240      // Range check the immediate.
2241      // lsl, ror: 0 <= imm <= 31
2242      // lsr, asr: 0 <= imm <= 32
2243      Imm = CE->getValue();
2244      if (Imm < 0 ||
2245          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2246          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2247        Error(ImmLoc, "immediate shift value out of range");
2248        return -1;
2249      }
2250    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2251      ShiftReg = tryParseRegister();
2252      SMLoc L = Parser.getTok().getLoc();
2253      if (ShiftReg == -1) {
2254        Error (L, "expected immediate or register in shift operand");
2255        return -1;
2256      }
2257    } else {
2258      Error (Parser.getTok().getLoc(),
2259                    "expected immediate or register in shift operand");
2260      return -1;
2261    }
2262  }
2263
2264  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2265    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2266                                                         ShiftReg, Imm,
2267                                               S, Parser.getTok().getLoc()));
2268  else
2269    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2270                                               S, Parser.getTok().getLoc()));
2271
2272  return 0;
2273}
2274
2275
2276/// Try to parse a register name.  The token must be an Identifier when called.
2277/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2278/// if there is a "writeback". 'true' if it's not a register.
2279///
2280/// TODO this is likely to change to allow different register types and or to
2281/// parse for a specific register type.
2282bool ARMAsmParser::
2283tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2284  SMLoc S = Parser.getTok().getLoc();
2285  int RegNo = tryParseRegister();
2286  if (RegNo == -1)
2287    return true;
2288
2289  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2290
2291  const AsmToken &ExclaimTok = Parser.getTok();
2292  if (ExclaimTok.is(AsmToken::Exclaim)) {
2293    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2294                                               ExclaimTok.getLoc()));
2295    Parser.Lex(); // Eat exclaim token
2296    return false;
2297  }
2298
2299  // Also check for an index operand. This is only legal for vector registers,
2300  // but that'll get caught OK in operand matching, so we don't need to
2301  // explicitly filter everything else out here.
2302  if (Parser.getTok().is(AsmToken::LBrac)) {
2303    SMLoc SIdx = Parser.getTok().getLoc();
2304    Parser.Lex(); // Eat left bracket token.
2305
2306    const MCExpr *ImmVal;
2307    if (getParser().ParseExpression(ImmVal))
2308      return MatchOperand_ParseFail;
2309    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2310    if (!MCE) {
2311      TokError("immediate value expected for vector index");
2312      return MatchOperand_ParseFail;
2313    }
2314
2315    SMLoc E = Parser.getTok().getLoc();
2316    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2317      Error(E, "']' expected");
2318      return MatchOperand_ParseFail;
2319    }
2320
2321    Parser.Lex(); // Eat right bracket token.
2322
2323    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2324                                                     SIdx, E,
2325                                                     getContext()));
2326  }
2327
2328  return false;
2329}
2330
2331/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2332/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2333/// "c5", ...
2334static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2335  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2336  // but efficient.
2337  switch (Name.size()) {
2338  default: break;
2339  case 2:
2340    if (Name[0] != CoprocOp)
2341      return -1;
2342    switch (Name[1]) {
2343    default:  return -1;
2344    case '0': return 0;
2345    case '1': return 1;
2346    case '2': return 2;
2347    case '3': return 3;
2348    case '4': return 4;
2349    case '5': return 5;
2350    case '6': return 6;
2351    case '7': return 7;
2352    case '8': return 8;
2353    case '9': return 9;
2354    }
2355    break;
2356  case 3:
2357    if (Name[0] != CoprocOp || Name[1] != '1')
2358      return -1;
2359    switch (Name[2]) {
2360    default:  return -1;
2361    case '0': return 10;
2362    case '1': return 11;
2363    case '2': return 12;
2364    case '3': return 13;
2365    case '4': return 14;
2366    case '5': return 15;
2367    }
2368    break;
2369  }
2370
2371  return -1;
2372}
2373
2374/// parseITCondCode - Try to parse a condition code for an IT instruction.
2375ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2376parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2377  SMLoc S = Parser.getTok().getLoc();
2378  const AsmToken &Tok = Parser.getTok();
2379  if (!Tok.is(AsmToken::Identifier))
2380    return MatchOperand_NoMatch;
2381  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2382    .Case("eq", ARMCC::EQ)
2383    .Case("ne", ARMCC::NE)
2384    .Case("hs", ARMCC::HS)
2385    .Case("cs", ARMCC::HS)
2386    .Case("lo", ARMCC::LO)
2387    .Case("cc", ARMCC::LO)
2388    .Case("mi", ARMCC::MI)
2389    .Case("pl", ARMCC::PL)
2390    .Case("vs", ARMCC::VS)
2391    .Case("vc", ARMCC::VC)
2392    .Case("hi", ARMCC::HI)
2393    .Case("ls", ARMCC::LS)
2394    .Case("ge", ARMCC::GE)
2395    .Case("lt", ARMCC::LT)
2396    .Case("gt", ARMCC::GT)
2397    .Case("le", ARMCC::LE)
2398    .Case("al", ARMCC::AL)
2399    .Default(~0U);
2400  if (CC == ~0U)
2401    return MatchOperand_NoMatch;
2402  Parser.Lex(); // Eat the token.
2403
2404  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2405
2406  return MatchOperand_Success;
2407}
2408
2409/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2410/// token must be an Identifier when called, and if it is a coprocessor
2411/// number, the token is eaten and the operand is added to the operand list.
2412ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2413parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2414  SMLoc S = Parser.getTok().getLoc();
2415  const AsmToken &Tok = Parser.getTok();
2416  if (Tok.isNot(AsmToken::Identifier))
2417    return MatchOperand_NoMatch;
2418
2419  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2420  if (Num == -1)
2421    return MatchOperand_NoMatch;
2422
2423  Parser.Lex(); // Eat identifier token.
2424  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2425  return MatchOperand_Success;
2426}
2427
2428/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2429/// token must be an Identifier when called, and if it is a coprocessor
2430/// number, the token is eaten and the operand is added to the operand list.
2431ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2432parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2433  SMLoc S = Parser.getTok().getLoc();
2434  const AsmToken &Tok = Parser.getTok();
2435  if (Tok.isNot(AsmToken::Identifier))
2436    return MatchOperand_NoMatch;
2437
2438  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2439  if (Reg == -1)
2440    return MatchOperand_NoMatch;
2441
2442  Parser.Lex(); // Eat identifier token.
2443  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2444  return MatchOperand_Success;
2445}
2446
2447/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2448/// coproc_option : '{' imm0_255 '}'
2449ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2450parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2451  SMLoc S = Parser.getTok().getLoc();
2452
2453  // If this isn't a '{', this isn't a coprocessor immediate operand.
2454  if (Parser.getTok().isNot(AsmToken::LCurly))
2455    return MatchOperand_NoMatch;
2456  Parser.Lex(); // Eat the '{'
2457
2458  const MCExpr *Expr;
2459  SMLoc Loc = Parser.getTok().getLoc();
2460  if (getParser().ParseExpression(Expr)) {
2461    Error(Loc, "illegal expression");
2462    return MatchOperand_ParseFail;
2463  }
2464  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2465  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2466    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2467    return MatchOperand_ParseFail;
2468  }
2469  int Val = CE->getValue();
2470
2471  // Check for and consume the closing '}'
2472  if (Parser.getTok().isNot(AsmToken::RCurly))
2473    return MatchOperand_ParseFail;
2474  SMLoc E = Parser.getTok().getLoc();
2475  Parser.Lex(); // Eat the '}'
2476
2477  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2478  return MatchOperand_Success;
2479}
2480
2481// For register list parsing, we need to map from raw GPR register numbering
2482// to the enumeration values. The enumeration values aren't sorted by
2483// register number due to our using "sp", "lr" and "pc" as canonical names.
2484static unsigned getNextRegister(unsigned Reg) {
2485  // If this is a GPR, we need to do it manually, otherwise we can rely
2486  // on the sort ordering of the enumeration since the other reg-classes
2487  // are sane.
2488  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2489    return Reg + 1;
2490  switch(Reg) {
2491  default: assert(0 && "Invalid GPR number!");
2492  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2493  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2494  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2495  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2496  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2497  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2498  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2499  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2500  }
2501}
2502
2503// Return the low-subreg of a given Q register.
2504static unsigned getDRegFromQReg(unsigned QReg) {
2505  switch (QReg) {
2506  default: llvm_unreachable("expected a Q register!");
2507  case ARM::Q0:  return ARM::D0;
2508  case ARM::Q1:  return ARM::D2;
2509  case ARM::Q2:  return ARM::D4;
2510  case ARM::Q3:  return ARM::D6;
2511  case ARM::Q4:  return ARM::D8;
2512  case ARM::Q5:  return ARM::D10;
2513  case ARM::Q6:  return ARM::D12;
2514  case ARM::Q7:  return ARM::D14;
2515  case ARM::Q8:  return ARM::D16;
2516  case ARM::Q9:  return ARM::D18;
2517  case ARM::Q10: return ARM::D20;
2518  case ARM::Q11: return ARM::D22;
2519  case ARM::Q12: return ARM::D24;
2520  case ARM::Q13: return ARM::D26;
2521  case ARM::Q14: return ARM::D28;
2522  case ARM::Q15: return ARM::D30;
2523  }
2524}
2525
2526/// Parse a register list.
2527bool ARMAsmParser::
2528parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2529  assert(Parser.getTok().is(AsmToken::LCurly) &&
2530         "Token is not a Left Curly Brace");
2531  SMLoc S = Parser.getTok().getLoc();
2532  Parser.Lex(); // Eat '{' token.
2533  SMLoc RegLoc = Parser.getTok().getLoc();
2534
2535  // Check the first register in the list to see what register class
2536  // this is a list of.
2537  int Reg = tryParseRegister();
2538  if (Reg == -1)
2539    return Error(RegLoc, "register expected");
2540
2541  // The reglist instructions have at most 16 registers, so reserve
2542  // space for that many.
2543  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2544
2545  // Allow Q regs and just interpret them as the two D sub-registers.
2546  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2547    Reg = getDRegFromQReg(Reg);
2548    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2549    ++Reg;
2550  }
2551  const MCRegisterClass *RC;
2552  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2553    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2554  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2555    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2556  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2557    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2558  else
2559    return Error(RegLoc, "invalid register in register list");
2560
2561  // Store the register.
2562  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2563
2564  // This starts immediately after the first register token in the list,
2565  // so we can see either a comma or a minus (range separator) as a legal
2566  // next token.
2567  while (Parser.getTok().is(AsmToken::Comma) ||
2568         Parser.getTok().is(AsmToken::Minus)) {
2569    if (Parser.getTok().is(AsmToken::Minus)) {
2570      Parser.Lex(); // Eat the minus.
2571      SMLoc EndLoc = Parser.getTok().getLoc();
2572      int EndReg = tryParseRegister();
2573      if (EndReg == -1)
2574        return Error(EndLoc, "register expected");
2575      // Allow Q regs and just interpret them as the two D sub-registers.
2576      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2577        EndReg = getDRegFromQReg(EndReg) + 1;
2578      // If the register is the same as the start reg, there's nothing
2579      // more to do.
2580      if (Reg == EndReg)
2581        continue;
2582      // The register must be in the same register class as the first.
2583      if (!RC->contains(EndReg))
2584        return Error(EndLoc, "invalid register in register list");
2585      // Ranges must go from low to high.
2586      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2587        return Error(EndLoc, "bad range in register list");
2588
2589      // Add all the registers in the range to the register list.
2590      while (Reg != EndReg) {
2591        Reg = getNextRegister(Reg);
2592        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2593      }
2594      continue;
2595    }
2596    Parser.Lex(); // Eat the comma.
2597    RegLoc = Parser.getTok().getLoc();
2598    int OldReg = Reg;
2599    Reg = tryParseRegister();
2600    if (Reg == -1)
2601      return Error(RegLoc, "register expected");
2602    // Allow Q regs and just interpret them as the two D sub-registers.
2603    bool isQReg = false;
2604    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2605      Reg = getDRegFromQReg(Reg);
2606      isQReg = true;
2607    }
2608    // The register must be in the same register class as the first.
2609    if (!RC->contains(Reg))
2610      return Error(RegLoc, "invalid register in register list");
2611    // List must be monotonically increasing.
2612    if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg))
2613      return Error(RegLoc, "register list not in ascending order");
2614    // VFP register lists must also be contiguous.
2615    // It's OK to use the enumeration values directly here rather, as the
2616    // VFP register classes have the enum sorted properly.
2617    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2618        Reg != OldReg + 1)
2619      return Error(RegLoc, "non-contiguous register range");
2620    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2621    if (isQReg)
2622      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2623  }
2624
2625  SMLoc E = Parser.getTok().getLoc();
2626  if (Parser.getTok().isNot(AsmToken::RCurly))
2627    return Error(E, "'}' expected");
2628  Parser.Lex(); // Eat '}' token.
2629
2630  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2631  return false;
2632}
2633
2634// Helper function to parse the lane index for vector lists.
2635ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2636parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2637  Index = 0; // Always return a defined index value.
2638  if (Parser.getTok().is(AsmToken::LBrac)) {
2639    Parser.Lex(); // Eat the '['.
2640    if (Parser.getTok().is(AsmToken::RBrac)) {
2641      // "Dn[]" is the 'all lanes' syntax.
2642      LaneKind = AllLanes;
2643      Parser.Lex(); // Eat the ']'.
2644      return MatchOperand_Success;
2645    }
2646    if (Parser.getTok().is(AsmToken::Integer)) {
2647      int64_t Val = Parser.getTok().getIntVal();
2648      // Make this range check context sensitive for .8, .16, .32.
2649      if (Val < 0 && Val > 7)
2650        Error(Parser.getTok().getLoc(), "lane index out of range");
2651      Index = Val;
2652      LaneKind = IndexedLane;
2653      Parser.Lex(); // Eat the token;
2654      if (Parser.getTok().isNot(AsmToken::RBrac))
2655        Error(Parser.getTok().getLoc(), "']' expected");
2656      Parser.Lex(); // Eat the ']'.
2657      return MatchOperand_Success;
2658    }
2659    Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2660    return MatchOperand_ParseFail;
2661  }
2662  LaneKind = NoLanes;
2663  return MatchOperand_Success;
2664}
2665
2666// parse a vector register list
2667ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2668parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2669  VectorLaneTy LaneKind;
2670  unsigned LaneIndex;
2671  SMLoc S = Parser.getTok().getLoc();
2672  // As an extension (to match gas), support a plain D register or Q register
2673  // (without encosing curly braces) as a single or double entry list,
2674  // respectively.
2675  if (Parser.getTok().is(AsmToken::Identifier)) {
2676    int Reg = tryParseRegister();
2677    if (Reg == -1)
2678      return MatchOperand_NoMatch;
2679    SMLoc E = Parser.getTok().getLoc();
2680    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2681      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2682      if (Res != MatchOperand_Success)
2683        return Res;
2684      switch (LaneKind) {
2685      default:
2686        assert(0 && "unexpected lane kind!");
2687      case NoLanes:
2688        E = Parser.getTok().getLoc();
2689        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E));
2690        break;
2691      case AllLanes:
2692        E = Parser.getTok().getLoc();
2693        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2694        break;
2695      case IndexedLane:
2696        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2697                                                               LaneIndex, S,E));
2698        break;
2699      }
2700      return MatchOperand_Success;
2701    }
2702    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2703      Reg = getDRegFromQReg(Reg);
2704      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2705      if (Res != MatchOperand_Success)
2706        return Res;
2707      switch (LaneKind) {
2708      default:
2709        assert(0 && "unexpected lane kind!");
2710      case NoLanes:
2711        E = Parser.getTok().getLoc();
2712        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E));
2713        break;
2714      case AllLanes:
2715        E = Parser.getTok().getLoc();
2716        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2717        break;
2718      case IndexedLane:
2719        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2720                                                               LaneIndex, S,E));
2721        break;
2722      }
2723      return MatchOperand_Success;
2724    }
2725    Error(S, "vector register expected");
2726    return MatchOperand_ParseFail;
2727  }
2728
2729  if (Parser.getTok().isNot(AsmToken::LCurly))
2730    return MatchOperand_NoMatch;
2731
2732  Parser.Lex(); // Eat '{' token.
2733  SMLoc RegLoc = Parser.getTok().getLoc();
2734
2735  int Reg = tryParseRegister();
2736  if (Reg == -1) {
2737    Error(RegLoc, "register expected");
2738    return MatchOperand_ParseFail;
2739  }
2740  unsigned Count = 1;
2741  unsigned FirstReg = Reg;
2742  // The list is of D registers, but we also allow Q regs and just interpret
2743  // them as the two D sub-registers.
2744  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2745    FirstReg = Reg = getDRegFromQReg(Reg);
2746    ++Reg;
2747    ++Count;
2748  }
2749  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2750    return MatchOperand_ParseFail;
2751
2752  while (Parser.getTok().is(AsmToken::Comma) ||
2753         Parser.getTok().is(AsmToken::Minus)) {
2754    if (Parser.getTok().is(AsmToken::Minus)) {
2755      Parser.Lex(); // Eat the minus.
2756      SMLoc EndLoc = Parser.getTok().getLoc();
2757      int EndReg = tryParseRegister();
2758      if (EndReg == -1) {
2759        Error(EndLoc, "register expected");
2760        return MatchOperand_ParseFail;
2761      }
2762      // Allow Q regs and just interpret them as the two D sub-registers.
2763      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2764        EndReg = getDRegFromQReg(EndReg) + 1;
2765      // If the register is the same as the start reg, there's nothing
2766      // more to do.
2767      if (Reg == EndReg)
2768        continue;
2769      // The register must be in the same register class as the first.
2770      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2771        Error(EndLoc, "invalid register in register list");
2772        return MatchOperand_ParseFail;
2773      }
2774      // Ranges must go from low to high.
2775      if (Reg > EndReg) {
2776        Error(EndLoc, "bad range in register list");
2777        return MatchOperand_ParseFail;
2778      }
2779      // Parse the lane specifier if present.
2780      VectorLaneTy NextLaneKind;
2781      unsigned NextLaneIndex;
2782      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2783        return MatchOperand_ParseFail;
2784      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2785        Error(EndLoc, "mismatched lane index in register list");
2786        return MatchOperand_ParseFail;
2787      }
2788      EndLoc = Parser.getTok().getLoc();
2789
2790      // Add all the registers in the range to the register list.
2791      Count += EndReg - Reg;
2792      Reg = EndReg;
2793      continue;
2794    }
2795    Parser.Lex(); // Eat the comma.
2796    RegLoc = Parser.getTok().getLoc();
2797    int OldReg = Reg;
2798    Reg = tryParseRegister();
2799    if (Reg == -1) {
2800      Error(RegLoc, "register expected");
2801      return MatchOperand_ParseFail;
2802    }
2803    // vector register lists must be contiguous.
2804    // It's OK to use the enumeration values directly here rather, as the
2805    // VFP register classes have the enum sorted properly.
2806    //
2807    // The list is of D registers, but we also allow Q regs and just interpret
2808    // them as the two D sub-registers.
2809    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2810      Reg = getDRegFromQReg(Reg);
2811      if (Reg != OldReg + 1) {
2812        Error(RegLoc, "non-contiguous register range");
2813        return MatchOperand_ParseFail;
2814      }
2815      ++Reg;
2816      Count += 2;
2817      // Parse the lane specifier if present.
2818      VectorLaneTy NextLaneKind;
2819      unsigned NextLaneIndex;
2820      SMLoc EndLoc = Parser.getTok().getLoc();
2821      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2822        return MatchOperand_ParseFail;
2823      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2824        Error(EndLoc, "mismatched lane index in register list");
2825        return MatchOperand_ParseFail;
2826      }
2827      continue;
2828    }
2829    // Normal D register. Just check that it's contiguous and keep going.
2830    if (Reg != OldReg + 1) {
2831      Error(RegLoc, "non-contiguous register range");
2832      return MatchOperand_ParseFail;
2833    }
2834    ++Count;
2835    // Parse the lane specifier if present.
2836    VectorLaneTy NextLaneKind;
2837    unsigned NextLaneIndex;
2838    SMLoc EndLoc = Parser.getTok().getLoc();
2839    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2840      return MatchOperand_ParseFail;
2841    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2842      Error(EndLoc, "mismatched lane index in register list");
2843      return MatchOperand_ParseFail;
2844    }
2845  }
2846
2847  SMLoc E = Parser.getTok().getLoc();
2848  if (Parser.getTok().isNot(AsmToken::RCurly)) {
2849    Error(E, "'}' expected");
2850    return MatchOperand_ParseFail;
2851  }
2852  Parser.Lex(); // Eat '}' token.
2853
2854  switch (LaneKind) {
2855  default:
2856    assert(0 && "unexpected lane kind in register list.");
2857  case NoLanes:
2858    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E));
2859    break;
2860  case AllLanes:
2861    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
2862                                                            S, E));
2863    break;
2864  case IndexedLane:
2865    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
2866                                                           LaneIndex, S, E));
2867    break;
2868  }
2869  return MatchOperand_Success;
2870}
2871
2872/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
2873ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2874parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2875  SMLoc S = Parser.getTok().getLoc();
2876  const AsmToken &Tok = Parser.getTok();
2877  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2878  StringRef OptStr = Tok.getString();
2879
2880  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
2881    .Case("sy",    ARM_MB::SY)
2882    .Case("st",    ARM_MB::ST)
2883    .Case("sh",    ARM_MB::ISH)
2884    .Case("ish",   ARM_MB::ISH)
2885    .Case("shst",  ARM_MB::ISHST)
2886    .Case("ishst", ARM_MB::ISHST)
2887    .Case("nsh",   ARM_MB::NSH)
2888    .Case("un",    ARM_MB::NSH)
2889    .Case("nshst", ARM_MB::NSHST)
2890    .Case("unst",  ARM_MB::NSHST)
2891    .Case("osh",   ARM_MB::OSH)
2892    .Case("oshst", ARM_MB::OSHST)
2893    .Default(~0U);
2894
2895  if (Opt == ~0U)
2896    return MatchOperand_NoMatch;
2897
2898  Parser.Lex(); // Eat identifier token.
2899  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
2900  return MatchOperand_Success;
2901}
2902
2903/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
2904ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2905parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2906  SMLoc S = Parser.getTok().getLoc();
2907  const AsmToken &Tok = Parser.getTok();
2908  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2909  StringRef IFlagsStr = Tok.getString();
2910
2911  // An iflags string of "none" is interpreted to mean that none of the AIF
2912  // bits are set.  Not a terribly useful instruction, but a valid encoding.
2913  unsigned IFlags = 0;
2914  if (IFlagsStr != "none") {
2915        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
2916      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
2917        .Case("a", ARM_PROC::A)
2918        .Case("i", ARM_PROC::I)
2919        .Case("f", ARM_PROC::F)
2920        .Default(~0U);
2921
2922      // If some specific iflag is already set, it means that some letter is
2923      // present more than once, this is not acceptable.
2924      if (Flag == ~0U || (IFlags & Flag))
2925        return MatchOperand_NoMatch;
2926
2927      IFlags |= Flag;
2928    }
2929  }
2930
2931  Parser.Lex(); // Eat identifier token.
2932  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
2933  return MatchOperand_Success;
2934}
2935
2936/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
2937ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2938parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2939  SMLoc S = Parser.getTok().getLoc();
2940  const AsmToken &Tok = Parser.getTok();
2941  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2942  StringRef Mask = Tok.getString();
2943
2944  if (isMClass()) {
2945    // See ARMv6-M 10.1.1
2946    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
2947      .Case("apsr", 0)
2948      .Case("iapsr", 1)
2949      .Case("eapsr", 2)
2950      .Case("xpsr", 3)
2951      .Case("ipsr", 5)
2952      .Case("epsr", 6)
2953      .Case("iepsr", 7)
2954      .Case("msp", 8)
2955      .Case("psp", 9)
2956      .Case("primask", 16)
2957      .Case("basepri", 17)
2958      .Case("basepri_max", 18)
2959      .Case("faultmask", 19)
2960      .Case("control", 20)
2961      .Default(~0U);
2962
2963    if (FlagsVal == ~0U)
2964      return MatchOperand_NoMatch;
2965
2966    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
2967      // basepri, basepri_max and faultmask only valid for V7m.
2968      return MatchOperand_NoMatch;
2969
2970    Parser.Lex(); // Eat identifier token.
2971    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
2972    return MatchOperand_Success;
2973  }
2974
2975  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
2976  size_t Start = 0, Next = Mask.find('_');
2977  StringRef Flags = "";
2978  std::string SpecReg = Mask.slice(Start, Next).lower();
2979  if (Next != StringRef::npos)
2980    Flags = Mask.slice(Next+1, Mask.size());
2981
2982  // FlagsVal contains the complete mask:
2983  // 3-0: Mask
2984  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
2985  unsigned FlagsVal = 0;
2986
2987  if (SpecReg == "apsr") {
2988    FlagsVal = StringSwitch<unsigned>(Flags)
2989    .Case("nzcvq",  0x8) // same as CPSR_f
2990    .Case("g",      0x4) // same as CPSR_s
2991    .Case("nzcvqg", 0xc) // same as CPSR_fs
2992    .Default(~0U);
2993
2994    if (FlagsVal == ~0U) {
2995      if (!Flags.empty())
2996        return MatchOperand_NoMatch;
2997      else
2998        FlagsVal = 8; // No flag
2999    }
3000  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
3001    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
3002      Flags = "fc";
3003    for (int i = 0, e = Flags.size(); i != e; ++i) {
3004      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
3005      .Case("c", 1)
3006      .Case("x", 2)
3007      .Case("s", 4)
3008      .Case("f", 8)
3009      .Default(~0U);
3010
3011      // If some specific flag is already set, it means that some letter is
3012      // present more than once, this is not acceptable.
3013      if (FlagsVal == ~0U || (FlagsVal & Flag))
3014        return MatchOperand_NoMatch;
3015      FlagsVal |= Flag;
3016    }
3017  } else // No match for special register.
3018    return MatchOperand_NoMatch;
3019
3020  // Special register without flags is NOT equivalent to "fc" flags.
3021  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
3022  // two lines would enable gas compatibility at the expense of breaking
3023  // round-tripping.
3024  //
3025  // if (!FlagsVal)
3026  //  FlagsVal = 0x9;
3027
3028  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
3029  if (SpecReg == "spsr")
3030    FlagsVal |= 16;
3031
3032  Parser.Lex(); // Eat identifier token.
3033  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
3034  return MatchOperand_Success;
3035}
3036
3037ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3038parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
3039            int Low, int High) {
3040  const AsmToken &Tok = Parser.getTok();
3041  if (Tok.isNot(AsmToken::Identifier)) {
3042    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3043    return MatchOperand_ParseFail;
3044  }
3045  StringRef ShiftName = Tok.getString();
3046  std::string LowerOp = Op.lower();
3047  std::string UpperOp = Op.upper();
3048  if (ShiftName != LowerOp && ShiftName != UpperOp) {
3049    Error(Parser.getTok().getLoc(), Op + " operand expected.");
3050    return MatchOperand_ParseFail;
3051  }
3052  Parser.Lex(); // Eat shift type token.
3053
3054  // There must be a '#' and a shift amount.
3055  if (Parser.getTok().isNot(AsmToken::Hash)) {
3056    Error(Parser.getTok().getLoc(), "'#' expected");
3057    return MatchOperand_ParseFail;
3058  }
3059  Parser.Lex(); // Eat hash token.
3060
3061  const MCExpr *ShiftAmount;
3062  SMLoc Loc = Parser.getTok().getLoc();
3063  if (getParser().ParseExpression(ShiftAmount)) {
3064    Error(Loc, "illegal expression");
3065    return MatchOperand_ParseFail;
3066  }
3067  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3068  if (!CE) {
3069    Error(Loc, "constant expression expected");
3070    return MatchOperand_ParseFail;
3071  }
3072  int Val = CE->getValue();
3073  if (Val < Low || Val > High) {
3074    Error(Loc, "immediate value out of range");
3075    return MatchOperand_ParseFail;
3076  }
3077
3078  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
3079
3080  return MatchOperand_Success;
3081}
3082
3083ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3084parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3085  const AsmToken &Tok = Parser.getTok();
3086  SMLoc S = Tok.getLoc();
3087  if (Tok.isNot(AsmToken::Identifier)) {
3088    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3089    return MatchOperand_ParseFail;
3090  }
3091  int Val = StringSwitch<int>(Tok.getString())
3092    .Case("be", 1)
3093    .Case("le", 0)
3094    .Default(-1);
3095  Parser.Lex(); // Eat the token.
3096
3097  if (Val == -1) {
3098    Error(Tok.getLoc(), "'be' or 'le' operand expected");
3099    return MatchOperand_ParseFail;
3100  }
3101  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
3102                                                                  getContext()),
3103                                           S, Parser.getTok().getLoc()));
3104  return MatchOperand_Success;
3105}
3106
3107/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3108/// instructions. Legal values are:
3109///     lsl #n  'n' in [0,31]
3110///     asr #n  'n' in [1,32]
3111///             n == 32 encoded as n == 0.
3112ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3113parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3114  const AsmToken &Tok = Parser.getTok();
3115  SMLoc S = Tok.getLoc();
3116  if (Tok.isNot(AsmToken::Identifier)) {
3117    Error(S, "shift operator 'asr' or 'lsl' expected");
3118    return MatchOperand_ParseFail;
3119  }
3120  StringRef ShiftName = Tok.getString();
3121  bool isASR;
3122  if (ShiftName == "lsl" || ShiftName == "LSL")
3123    isASR = false;
3124  else if (ShiftName == "asr" || ShiftName == "ASR")
3125    isASR = true;
3126  else {
3127    Error(S, "shift operator 'asr' or 'lsl' expected");
3128    return MatchOperand_ParseFail;
3129  }
3130  Parser.Lex(); // Eat the operator.
3131
3132  // A '#' and a shift amount.
3133  if (Parser.getTok().isNot(AsmToken::Hash)) {
3134    Error(Parser.getTok().getLoc(), "'#' expected");
3135    return MatchOperand_ParseFail;
3136  }
3137  Parser.Lex(); // Eat hash token.
3138
3139  const MCExpr *ShiftAmount;
3140  SMLoc E = Parser.getTok().getLoc();
3141  if (getParser().ParseExpression(ShiftAmount)) {
3142    Error(E, "malformed shift expression");
3143    return MatchOperand_ParseFail;
3144  }
3145  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3146  if (!CE) {
3147    Error(E, "shift amount must be an immediate");
3148    return MatchOperand_ParseFail;
3149  }
3150
3151  int64_t Val = CE->getValue();
3152  if (isASR) {
3153    // Shift amount must be in [1,32]
3154    if (Val < 1 || Val > 32) {
3155      Error(E, "'asr' shift amount must be in range [1,32]");
3156      return MatchOperand_ParseFail;
3157    }
3158    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3159    if (isThumb() && Val == 32) {
3160      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3161      return MatchOperand_ParseFail;
3162    }
3163    if (Val == 32) Val = 0;
3164  } else {
3165    // Shift amount must be in [1,32]
3166    if (Val < 0 || Val > 31) {
3167      Error(E, "'lsr' shift amount must be in range [0,31]");
3168      return MatchOperand_ParseFail;
3169    }
3170  }
3171
3172  E = Parser.getTok().getLoc();
3173  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3174
3175  return MatchOperand_Success;
3176}
3177
3178/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3179/// of instructions. Legal values are:
3180///     ror #n  'n' in {0, 8, 16, 24}
3181ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3182parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3183  const AsmToken &Tok = Parser.getTok();
3184  SMLoc S = Tok.getLoc();
3185  if (Tok.isNot(AsmToken::Identifier))
3186    return MatchOperand_NoMatch;
3187  StringRef ShiftName = Tok.getString();
3188  if (ShiftName != "ror" && ShiftName != "ROR")
3189    return MatchOperand_NoMatch;
3190  Parser.Lex(); // Eat the operator.
3191
3192  // A '#' and a rotate amount.
3193  if (Parser.getTok().isNot(AsmToken::Hash)) {
3194    Error(Parser.getTok().getLoc(), "'#' expected");
3195    return MatchOperand_ParseFail;
3196  }
3197  Parser.Lex(); // Eat hash token.
3198
3199  const MCExpr *ShiftAmount;
3200  SMLoc E = Parser.getTok().getLoc();
3201  if (getParser().ParseExpression(ShiftAmount)) {
3202    Error(E, "malformed rotate expression");
3203    return MatchOperand_ParseFail;
3204  }
3205  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3206  if (!CE) {
3207    Error(E, "rotate amount must be an immediate");
3208    return MatchOperand_ParseFail;
3209  }
3210
3211  int64_t Val = CE->getValue();
3212  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3213  // normally, zero is represented in asm by omitting the rotate operand
3214  // entirely.
3215  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3216    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3217    return MatchOperand_ParseFail;
3218  }
3219
3220  E = Parser.getTok().getLoc();
3221  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3222
3223  return MatchOperand_Success;
3224}
3225
3226ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3227parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3228  SMLoc S = Parser.getTok().getLoc();
3229  // The bitfield descriptor is really two operands, the LSB and the width.
3230  if (Parser.getTok().isNot(AsmToken::Hash)) {
3231    Error(Parser.getTok().getLoc(), "'#' expected");
3232    return MatchOperand_ParseFail;
3233  }
3234  Parser.Lex(); // Eat hash token.
3235
3236  const MCExpr *LSBExpr;
3237  SMLoc E = Parser.getTok().getLoc();
3238  if (getParser().ParseExpression(LSBExpr)) {
3239    Error(E, "malformed immediate expression");
3240    return MatchOperand_ParseFail;
3241  }
3242  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3243  if (!CE) {
3244    Error(E, "'lsb' operand must be an immediate");
3245    return MatchOperand_ParseFail;
3246  }
3247
3248  int64_t LSB = CE->getValue();
3249  // The LSB must be in the range [0,31]
3250  if (LSB < 0 || LSB > 31) {
3251    Error(E, "'lsb' operand must be in the range [0,31]");
3252    return MatchOperand_ParseFail;
3253  }
3254  E = Parser.getTok().getLoc();
3255
3256  // Expect another immediate operand.
3257  if (Parser.getTok().isNot(AsmToken::Comma)) {
3258    Error(Parser.getTok().getLoc(), "too few operands");
3259    return MatchOperand_ParseFail;
3260  }
3261  Parser.Lex(); // Eat hash token.
3262  if (Parser.getTok().isNot(AsmToken::Hash)) {
3263    Error(Parser.getTok().getLoc(), "'#' expected");
3264    return MatchOperand_ParseFail;
3265  }
3266  Parser.Lex(); // Eat hash token.
3267
3268  const MCExpr *WidthExpr;
3269  if (getParser().ParseExpression(WidthExpr)) {
3270    Error(E, "malformed immediate expression");
3271    return MatchOperand_ParseFail;
3272  }
3273  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3274  if (!CE) {
3275    Error(E, "'width' operand must be an immediate");
3276    return MatchOperand_ParseFail;
3277  }
3278
3279  int64_t Width = CE->getValue();
3280  // The LSB must be in the range [1,32-lsb]
3281  if (Width < 1 || Width > 32 - LSB) {
3282    Error(E, "'width' operand must be in the range [1,32-lsb]");
3283    return MatchOperand_ParseFail;
3284  }
3285  E = Parser.getTok().getLoc();
3286
3287  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3288
3289  return MatchOperand_Success;
3290}
3291
3292ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3293parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3294  // Check for a post-index addressing register operand. Specifically:
3295  // postidx_reg := '+' register {, shift}
3296  //              | '-' register {, shift}
3297  //              | register {, shift}
3298
3299  // This method must return MatchOperand_NoMatch without consuming any tokens
3300  // in the case where there is no match, as other alternatives take other
3301  // parse methods.
3302  AsmToken Tok = Parser.getTok();
3303  SMLoc S = Tok.getLoc();
3304  bool haveEaten = false;
3305  bool isAdd = true;
3306  int Reg = -1;
3307  if (Tok.is(AsmToken::Plus)) {
3308    Parser.Lex(); // Eat the '+' token.
3309    haveEaten = true;
3310  } else if (Tok.is(AsmToken::Minus)) {
3311    Parser.Lex(); // Eat the '-' token.
3312    isAdd = false;
3313    haveEaten = true;
3314  }
3315  if (Parser.getTok().is(AsmToken::Identifier))
3316    Reg = tryParseRegister();
3317  if (Reg == -1) {
3318    if (!haveEaten)
3319      return MatchOperand_NoMatch;
3320    Error(Parser.getTok().getLoc(), "register expected");
3321    return MatchOperand_ParseFail;
3322  }
3323  SMLoc E = Parser.getTok().getLoc();
3324
3325  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3326  unsigned ShiftImm = 0;
3327  if (Parser.getTok().is(AsmToken::Comma)) {
3328    Parser.Lex(); // Eat the ','.
3329    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3330      return MatchOperand_ParseFail;
3331  }
3332
3333  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3334                                                  ShiftImm, S, E));
3335
3336  return MatchOperand_Success;
3337}
3338
3339ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3340parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3341  // Check for a post-index addressing register operand. Specifically:
3342  // am3offset := '+' register
3343  //              | '-' register
3344  //              | register
3345  //              | # imm
3346  //              | # + imm
3347  //              | # - imm
3348
3349  // This method must return MatchOperand_NoMatch without consuming any tokens
3350  // in the case where there is no match, as other alternatives take other
3351  // parse methods.
3352  AsmToken Tok = Parser.getTok();
3353  SMLoc S = Tok.getLoc();
3354
3355  // Do immediates first, as we always parse those if we have a '#'.
3356  if (Parser.getTok().is(AsmToken::Hash)) {
3357    Parser.Lex(); // Eat the '#'.
3358    // Explicitly look for a '-', as we need to encode negative zero
3359    // differently.
3360    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3361    const MCExpr *Offset;
3362    if (getParser().ParseExpression(Offset))
3363      return MatchOperand_ParseFail;
3364    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3365    if (!CE) {
3366      Error(S, "constant expression expected");
3367      return MatchOperand_ParseFail;
3368    }
3369    SMLoc E = Tok.getLoc();
3370    // Negative zero is encoded as the flag value INT32_MIN.
3371    int32_t Val = CE->getValue();
3372    if (isNegative && Val == 0)
3373      Val = INT32_MIN;
3374
3375    Operands.push_back(
3376      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3377
3378    return MatchOperand_Success;
3379  }
3380
3381
3382  bool haveEaten = false;
3383  bool isAdd = true;
3384  int Reg = -1;
3385  if (Tok.is(AsmToken::Plus)) {
3386    Parser.Lex(); // Eat the '+' token.
3387    haveEaten = true;
3388  } else if (Tok.is(AsmToken::Minus)) {
3389    Parser.Lex(); // Eat the '-' token.
3390    isAdd = false;
3391    haveEaten = true;
3392  }
3393  if (Parser.getTok().is(AsmToken::Identifier))
3394    Reg = tryParseRegister();
3395  if (Reg == -1) {
3396    if (!haveEaten)
3397      return MatchOperand_NoMatch;
3398    Error(Parser.getTok().getLoc(), "register expected");
3399    return MatchOperand_ParseFail;
3400  }
3401  SMLoc E = Parser.getTok().getLoc();
3402
3403  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3404                                                  0, S, E));
3405
3406  return MatchOperand_Success;
3407}
3408
3409/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3410/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3411/// when they refer multiple MIOperands inside a single one.
3412bool ARMAsmParser::
3413cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3414             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3415  // Rt, Rt2
3416  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3417  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3418  // Create a writeback register dummy placeholder.
3419  Inst.addOperand(MCOperand::CreateReg(0));
3420  // addr
3421  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3422  // pred
3423  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3424  return true;
3425}
3426
3427/// cvtT2StrdPre - Convert parsed operands to MCInst.
3428/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3429/// when they refer multiple MIOperands inside a single one.
3430bool ARMAsmParser::
3431cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3432             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3433  // Create a writeback register dummy placeholder.
3434  Inst.addOperand(MCOperand::CreateReg(0));
3435  // Rt, Rt2
3436  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3437  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3438  // addr
3439  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3440  // pred
3441  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3442  return true;
3443}
3444
3445/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3446/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3447/// when they refer multiple MIOperands inside a single one.
3448bool ARMAsmParser::
3449cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3450                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3451  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3452
3453  // Create a writeback register dummy placeholder.
3454  Inst.addOperand(MCOperand::CreateImm(0));
3455
3456  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3457  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3458  return true;
3459}
3460
3461/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3462/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3463/// when they refer multiple MIOperands inside a single one.
3464bool ARMAsmParser::
3465cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3466                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3467  // Create a writeback register dummy placeholder.
3468  Inst.addOperand(MCOperand::CreateImm(0));
3469  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3470  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3471  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3472  return true;
3473}
3474
3475/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3476/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3477/// when they refer multiple MIOperands inside a single one.
3478bool ARMAsmParser::
3479cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3480                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3481  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3482
3483  // Create a writeback register dummy placeholder.
3484  Inst.addOperand(MCOperand::CreateImm(0));
3485
3486  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3487  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3488  return true;
3489}
3490
3491/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3492/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3493/// when they refer multiple MIOperands inside a single one.
3494bool ARMAsmParser::
3495cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3496                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3497  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3498
3499  // Create a writeback register dummy placeholder.
3500  Inst.addOperand(MCOperand::CreateImm(0));
3501
3502  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3503  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3504  return true;
3505}
3506
3507
3508/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3509/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3510/// when they refer multiple MIOperands inside a single one.
3511bool ARMAsmParser::
3512cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3513                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3514  // Create a writeback register dummy placeholder.
3515  Inst.addOperand(MCOperand::CreateImm(0));
3516  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3517  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3518  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3519  return true;
3520}
3521
3522/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3523/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3524/// when they refer multiple MIOperands inside a single one.
3525bool ARMAsmParser::
3526cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3527                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3528  // Create a writeback register dummy placeholder.
3529  Inst.addOperand(MCOperand::CreateImm(0));
3530  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3531  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3532  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3533  return true;
3534}
3535
3536/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3537/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3538/// when they refer multiple MIOperands inside a single one.
3539bool ARMAsmParser::
3540cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3541                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3542  // Create a writeback register dummy placeholder.
3543  Inst.addOperand(MCOperand::CreateImm(0));
3544  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3545  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3546  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3547  return true;
3548}
3549
3550/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3551/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3552/// when they refer multiple MIOperands inside a single one.
3553bool ARMAsmParser::
3554cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3555                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3556  // Rt
3557  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3558  // Create a writeback register dummy placeholder.
3559  Inst.addOperand(MCOperand::CreateImm(0));
3560  // addr
3561  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3562  // offset
3563  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3564  // pred
3565  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3566  return true;
3567}
3568
3569/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3570/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3571/// when they refer multiple MIOperands inside a single one.
3572bool ARMAsmParser::
3573cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3574                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3575  // Rt
3576  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3577  // Create a writeback register dummy placeholder.
3578  Inst.addOperand(MCOperand::CreateImm(0));
3579  // addr
3580  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3581  // offset
3582  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3583  // pred
3584  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3585  return true;
3586}
3587
3588/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3589/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3590/// when they refer multiple MIOperands inside a single one.
3591bool ARMAsmParser::
3592cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3593                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3594  // Create a writeback register dummy placeholder.
3595  Inst.addOperand(MCOperand::CreateImm(0));
3596  // Rt
3597  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3598  // addr
3599  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3600  // offset
3601  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3602  // pred
3603  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3604  return true;
3605}
3606
3607/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3608/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3609/// when they refer multiple MIOperands inside a single one.
3610bool ARMAsmParser::
3611cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3612                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3613  // Create a writeback register dummy placeholder.
3614  Inst.addOperand(MCOperand::CreateImm(0));
3615  // Rt
3616  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3617  // addr
3618  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3619  // offset
3620  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3621  // pred
3622  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3623  return true;
3624}
3625
3626/// cvtLdrdPre - Convert parsed operands to MCInst.
3627/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3628/// when they refer multiple MIOperands inside a single one.
3629bool ARMAsmParser::
3630cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3631           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3632  // Rt, Rt2
3633  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3634  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3635  // Create a writeback register dummy placeholder.
3636  Inst.addOperand(MCOperand::CreateImm(0));
3637  // addr
3638  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3639  // pred
3640  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3641  return true;
3642}
3643
3644/// cvtStrdPre - Convert parsed operands to MCInst.
3645/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3646/// when they refer multiple MIOperands inside a single one.
3647bool ARMAsmParser::
3648cvtStrdPre(MCInst &Inst, unsigned Opcode,
3649           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3650  // Create a writeback register dummy placeholder.
3651  Inst.addOperand(MCOperand::CreateImm(0));
3652  // Rt, Rt2
3653  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3654  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3655  // addr
3656  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3657  // pred
3658  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3659  return true;
3660}
3661
3662/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3663/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3664/// when they refer multiple MIOperands inside a single one.
3665bool ARMAsmParser::
3666cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3667                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3668  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3669  // Create a writeback register dummy placeholder.
3670  Inst.addOperand(MCOperand::CreateImm(0));
3671  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3672  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3673  return true;
3674}
3675
3676/// cvtThumbMultiple- Convert parsed operands to MCInst.
3677/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3678/// when they refer multiple MIOperands inside a single one.
3679bool ARMAsmParser::
3680cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3681           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3682  // The second source operand must be the same register as the destination
3683  // operand.
3684  if (Operands.size() == 6 &&
3685      (((ARMOperand*)Operands[3])->getReg() !=
3686       ((ARMOperand*)Operands[5])->getReg()) &&
3687      (((ARMOperand*)Operands[3])->getReg() !=
3688       ((ARMOperand*)Operands[4])->getReg())) {
3689    Error(Operands[3]->getStartLoc(),
3690          "destination register must match source register");
3691    return false;
3692  }
3693  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3694  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3695  // If we have a three-operand form, make sure to set Rn to be the operand
3696  // that isn't the same as Rd.
3697  unsigned RegOp = 4;
3698  if (Operands.size() == 6 &&
3699      ((ARMOperand*)Operands[4])->getReg() ==
3700        ((ARMOperand*)Operands[3])->getReg())
3701    RegOp = 5;
3702  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3703  Inst.addOperand(Inst.getOperand(0));
3704  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3705
3706  return true;
3707}
3708
3709bool ARMAsmParser::
3710cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3711              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3712  // Vd
3713  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3714  // Create a writeback register dummy placeholder.
3715  Inst.addOperand(MCOperand::CreateImm(0));
3716  // Vn
3717  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3718  // pred
3719  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3720  return true;
3721}
3722
3723bool ARMAsmParser::
3724cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3725                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3726  // Vd
3727  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3728  // Create a writeback register dummy placeholder.
3729  Inst.addOperand(MCOperand::CreateImm(0));
3730  // Vn
3731  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3732  // Vm
3733  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3734  // pred
3735  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3736  return true;
3737}
3738
3739bool ARMAsmParser::
3740cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3741              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3742  // Create a writeback register dummy placeholder.
3743  Inst.addOperand(MCOperand::CreateImm(0));
3744  // Vn
3745  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3746  // Vt
3747  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3748  // pred
3749  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3750  return true;
3751}
3752
3753bool ARMAsmParser::
3754cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3755                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3756  // Create a writeback register dummy placeholder.
3757  Inst.addOperand(MCOperand::CreateImm(0));
3758  // Vn
3759  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3760  // Vm
3761  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3762  // Vt
3763  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3764  // pred
3765  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3766  return true;
3767}
3768
3769/// Parse an ARM memory expression, return false if successful else return true
3770/// or an error.  The first token must be a '[' when called.
3771bool ARMAsmParser::
3772parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3773  SMLoc S, E;
3774  assert(Parser.getTok().is(AsmToken::LBrac) &&
3775         "Token is not a Left Bracket");
3776  S = Parser.getTok().getLoc();
3777  Parser.Lex(); // Eat left bracket token.
3778
3779  const AsmToken &BaseRegTok = Parser.getTok();
3780  int BaseRegNum = tryParseRegister();
3781  if (BaseRegNum == -1)
3782    return Error(BaseRegTok.getLoc(), "register expected");
3783
3784  // The next token must either be a comma or a closing bracket.
3785  const AsmToken &Tok = Parser.getTok();
3786  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3787    return Error(Tok.getLoc(), "malformed memory operand");
3788
3789  if (Tok.is(AsmToken::RBrac)) {
3790    E = Tok.getLoc();
3791    Parser.Lex(); // Eat right bracket token.
3792
3793    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3794                                             0, 0, false, S, E));
3795
3796    // If there's a pre-indexing writeback marker, '!', just add it as a token
3797    // operand. It's rather odd, but syntactically valid.
3798    if (Parser.getTok().is(AsmToken::Exclaim)) {
3799      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3800      Parser.Lex(); // Eat the '!'.
3801    }
3802
3803    return false;
3804  }
3805
3806  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
3807  Parser.Lex(); // Eat the comma.
3808
3809  // If we have a ':', it's an alignment specifier.
3810  if (Parser.getTok().is(AsmToken::Colon)) {
3811    Parser.Lex(); // Eat the ':'.
3812    E = Parser.getTok().getLoc();
3813
3814    const MCExpr *Expr;
3815    if (getParser().ParseExpression(Expr))
3816     return true;
3817
3818    // The expression has to be a constant. Memory references with relocations
3819    // don't come through here, as they use the <label> forms of the relevant
3820    // instructions.
3821    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3822    if (!CE)
3823      return Error (E, "constant expression expected");
3824
3825    unsigned Align = 0;
3826    switch (CE->getValue()) {
3827    default:
3828      return Error(E, "alignment specifier must be 64, 128, or 256 bits");
3829    case 64:  Align = 8; break;
3830    case 128: Align = 16; break;
3831    case 256: Align = 32; break;
3832    }
3833
3834    // Now we should have the closing ']'
3835    E = Parser.getTok().getLoc();
3836    if (Parser.getTok().isNot(AsmToken::RBrac))
3837      return Error(E, "']' expected");
3838    Parser.Lex(); // Eat right bracket token.
3839
3840    // Don't worry about range checking the value here. That's handled by
3841    // the is*() predicates.
3842    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
3843                                             ARM_AM::no_shift, 0, Align,
3844                                             false, S, E));
3845
3846    // If there's a pre-indexing writeback marker, '!', just add it as a token
3847    // operand.
3848    if (Parser.getTok().is(AsmToken::Exclaim)) {
3849      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3850      Parser.Lex(); // Eat the '!'.
3851    }
3852
3853    return false;
3854  }
3855
3856  // If we have a '#', it's an immediate offset, else assume it's a register
3857  // offset. Be friendly and also accept a plain integer (without a leading
3858  // hash) for gas compatibility.
3859  if (Parser.getTok().is(AsmToken::Hash) ||
3860      Parser.getTok().is(AsmToken::Integer)) {
3861    if (Parser.getTok().is(AsmToken::Hash))
3862      Parser.Lex(); // Eat the '#'.
3863    E = Parser.getTok().getLoc();
3864
3865    bool isNegative = getParser().getTok().is(AsmToken::Minus);
3866    const MCExpr *Offset;
3867    if (getParser().ParseExpression(Offset))
3868     return true;
3869
3870    // The expression has to be a constant. Memory references with relocations
3871    // don't come through here, as they use the <label> forms of the relevant
3872    // instructions.
3873    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3874    if (!CE)
3875      return Error (E, "constant expression expected");
3876
3877    // If the constant was #-0, represent it as INT32_MIN.
3878    int32_t Val = CE->getValue();
3879    if (isNegative && Val == 0)
3880      CE = MCConstantExpr::Create(INT32_MIN, getContext());
3881
3882    // Now we should have the closing ']'
3883    E = Parser.getTok().getLoc();
3884    if (Parser.getTok().isNot(AsmToken::RBrac))
3885      return Error(E, "']' expected");
3886    Parser.Lex(); // Eat right bracket token.
3887
3888    // Don't worry about range checking the value here. That's handled by
3889    // the is*() predicates.
3890    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
3891                                             ARM_AM::no_shift, 0, 0,
3892                                             false, S, E));
3893
3894    // If there's a pre-indexing writeback marker, '!', just add it as a token
3895    // operand.
3896    if (Parser.getTok().is(AsmToken::Exclaim)) {
3897      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3898      Parser.Lex(); // Eat the '!'.
3899    }
3900
3901    return false;
3902  }
3903
3904  // The register offset is optionally preceded by a '+' or '-'
3905  bool isNegative = false;
3906  if (Parser.getTok().is(AsmToken::Minus)) {
3907    isNegative = true;
3908    Parser.Lex(); // Eat the '-'.
3909  } else if (Parser.getTok().is(AsmToken::Plus)) {
3910    // Nothing to do.
3911    Parser.Lex(); // Eat the '+'.
3912  }
3913
3914  E = Parser.getTok().getLoc();
3915  int OffsetRegNum = tryParseRegister();
3916  if (OffsetRegNum == -1)
3917    return Error(E, "register expected");
3918
3919  // If there's a shift operator, handle it.
3920  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
3921  unsigned ShiftImm = 0;
3922  if (Parser.getTok().is(AsmToken::Comma)) {
3923    Parser.Lex(); // Eat the ','.
3924    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
3925      return true;
3926  }
3927
3928  // Now we should have the closing ']'
3929  E = Parser.getTok().getLoc();
3930  if (Parser.getTok().isNot(AsmToken::RBrac))
3931    return Error(E, "']' expected");
3932  Parser.Lex(); // Eat right bracket token.
3933
3934  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
3935                                           ShiftType, ShiftImm, 0, isNegative,
3936                                           S, E));
3937
3938  // If there's a pre-indexing writeback marker, '!', just add it as a token
3939  // operand.
3940  if (Parser.getTok().is(AsmToken::Exclaim)) {
3941    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3942    Parser.Lex(); // Eat the '!'.
3943  }
3944
3945  return false;
3946}
3947
3948/// parseMemRegOffsetShift - one of these two:
3949///   ( lsl | lsr | asr | ror ) , # shift_amount
3950///   rrx
3951/// return true if it parses a shift otherwise it returns false.
3952bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
3953                                          unsigned &Amount) {
3954  SMLoc Loc = Parser.getTok().getLoc();
3955  const AsmToken &Tok = Parser.getTok();
3956  if (Tok.isNot(AsmToken::Identifier))
3957    return true;
3958  StringRef ShiftName = Tok.getString();
3959  if (ShiftName == "lsl" || ShiftName == "LSL" ||
3960      ShiftName == "asl" || ShiftName == "ASL")
3961    St = ARM_AM::lsl;
3962  else if (ShiftName == "lsr" || ShiftName == "LSR")
3963    St = ARM_AM::lsr;
3964  else if (ShiftName == "asr" || ShiftName == "ASR")
3965    St = ARM_AM::asr;
3966  else if (ShiftName == "ror" || ShiftName == "ROR")
3967    St = ARM_AM::ror;
3968  else if (ShiftName == "rrx" || ShiftName == "RRX")
3969    St = ARM_AM::rrx;
3970  else
3971    return Error(Loc, "illegal shift operator");
3972  Parser.Lex(); // Eat shift type token.
3973
3974  // rrx stands alone.
3975  Amount = 0;
3976  if (St != ARM_AM::rrx) {
3977    Loc = Parser.getTok().getLoc();
3978    // A '#' and a shift amount.
3979    const AsmToken &HashTok = Parser.getTok();
3980    if (HashTok.isNot(AsmToken::Hash))
3981      return Error(HashTok.getLoc(), "'#' expected");
3982    Parser.Lex(); // Eat hash token.
3983
3984    const MCExpr *Expr;
3985    if (getParser().ParseExpression(Expr))
3986      return true;
3987    // Range check the immediate.
3988    // lsl, ror: 0 <= imm <= 31
3989    // lsr, asr: 0 <= imm <= 32
3990    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3991    if (!CE)
3992      return Error(Loc, "shift amount must be an immediate");
3993    int64_t Imm = CE->getValue();
3994    if (Imm < 0 ||
3995        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
3996        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
3997      return Error(Loc, "immediate shift value out of range");
3998    Amount = Imm;
3999  }
4000
4001  return false;
4002}
4003
4004/// parseFPImm - A floating point immediate expression operand.
4005ARMAsmParser::OperandMatchResultTy ARMAsmParser::
4006parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4007  SMLoc S = Parser.getTok().getLoc();
4008
4009  if (Parser.getTok().isNot(AsmToken::Hash))
4010    return MatchOperand_NoMatch;
4011
4012  // Disambiguate the VMOV forms that can accept an FP immediate.
4013  // vmov.f32 <sreg>, #imm
4014  // vmov.f64 <dreg>, #imm
4015  // vmov.f32 <dreg>, #imm  @ vector f32x2
4016  // vmov.f32 <qreg>, #imm  @ vector f32x4
4017  //
4018  // There are also the NEON VMOV instructions which expect an
4019  // integer constant. Make sure we don't try to parse an FPImm
4020  // for these:
4021  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
4022  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
4023  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
4024                           TyOp->getToken() != ".f64"))
4025    return MatchOperand_NoMatch;
4026
4027  Parser.Lex(); // Eat the '#'.
4028
4029  // Handle negation, as that still comes through as a separate token.
4030  bool isNegative = false;
4031  if (Parser.getTok().is(AsmToken::Minus)) {
4032    isNegative = true;
4033    Parser.Lex();
4034  }
4035  const AsmToken &Tok = Parser.getTok();
4036  if (Tok.is(AsmToken::Real)) {
4037    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
4038    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
4039    // If we had a '-' in front, toggle the sign bit.
4040    IntVal ^= (uint64_t)isNegative << 63;
4041    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
4042    Parser.Lex(); // Eat the token.
4043    if (Val == -1) {
4044      TokError("floating point value out of range");
4045      return MatchOperand_ParseFail;
4046    }
4047    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4048    return MatchOperand_Success;
4049  }
4050  if (Tok.is(AsmToken::Integer)) {
4051    int64_t Val = Tok.getIntVal();
4052    Parser.Lex(); // Eat the token.
4053    if (Val > 255 || Val < 0) {
4054      TokError("encoded floating point value out of range");
4055      return MatchOperand_ParseFail;
4056    }
4057    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
4058    return MatchOperand_Success;
4059  }
4060
4061  TokError("invalid floating point immediate");
4062  return MatchOperand_ParseFail;
4063}
4064/// Parse a arm instruction operand.  For now this parses the operand regardless
4065/// of the mnemonic.
4066bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
4067                                StringRef Mnemonic) {
4068  SMLoc S, E;
4069
4070  // Check if the current operand has a custom associated parser, if so, try to
4071  // custom parse the operand, or fallback to the general approach.
4072  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
4073  if (ResTy == MatchOperand_Success)
4074    return false;
4075  // If there wasn't a custom match, try the generic matcher below. Otherwise,
4076  // there was a match, but an error occurred, in which case, just return that
4077  // the operand parsing failed.
4078  if (ResTy == MatchOperand_ParseFail)
4079    return true;
4080
4081  switch (getLexer().getKind()) {
4082  default:
4083    Error(Parser.getTok().getLoc(), "unexpected token in operand");
4084    return true;
4085  case AsmToken::Identifier: {
4086    // If this is VMRS, check for the apsr_nzcv operand.
4087    if (!tryParseRegisterWithWriteBack(Operands))
4088      return false;
4089    int Res = tryParseShiftRegister(Operands);
4090    if (Res == 0) // success
4091      return false;
4092    else if (Res == -1) // irrecoverable error
4093      return true;
4094    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
4095      S = Parser.getTok().getLoc();
4096      Parser.Lex();
4097      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
4098      return false;
4099    }
4100
4101    // Fall though for the Identifier case that is not a register or a
4102    // special name.
4103  }
4104  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4105  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4106  case AsmToken::String:  // quoted label names.
4107  case AsmToken::Dot: {   // . as a branch target
4108    // This was not a register so parse other operands that start with an
4109    // identifier (like labels) as expressions and create them as immediates.
4110    const MCExpr *IdVal;
4111    S = Parser.getTok().getLoc();
4112    if (getParser().ParseExpression(IdVal))
4113      return true;
4114    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4115    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4116    return false;
4117  }
4118  case AsmToken::LBrac:
4119    return parseMemory(Operands);
4120  case AsmToken::LCurly:
4121    return parseRegisterList(Operands);
4122  case AsmToken::Hash: {
4123    // #42 -> immediate.
4124    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4125    S = Parser.getTok().getLoc();
4126    Parser.Lex();
4127    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4128    const MCExpr *ImmVal;
4129    if (getParser().ParseExpression(ImmVal))
4130      return true;
4131    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4132    if (CE) {
4133      int32_t Val = CE->getValue();
4134      if (isNegative && Val == 0)
4135        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4136    }
4137    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4138    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4139    return false;
4140  }
4141  case AsmToken::Colon: {
4142    // ":lower16:" and ":upper16:" expression prefixes
4143    // FIXME: Check it's an expression prefix,
4144    // e.g. (FOO - :lower16:BAR) isn't legal.
4145    ARMMCExpr::VariantKind RefKind;
4146    if (parsePrefix(RefKind))
4147      return true;
4148
4149    const MCExpr *SubExprVal;
4150    if (getParser().ParseExpression(SubExprVal))
4151      return true;
4152
4153    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4154                                                   getContext());
4155    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4156    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4157    return false;
4158  }
4159  }
4160}
4161
4162// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4163//  :lower16: and :upper16:.
4164bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4165  RefKind = ARMMCExpr::VK_ARM_None;
4166
4167  // :lower16: and :upper16: modifiers
4168  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4169  Parser.Lex(); // Eat ':'
4170
4171  if (getLexer().isNot(AsmToken::Identifier)) {
4172    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4173    return true;
4174  }
4175
4176  StringRef IDVal = Parser.getTok().getIdentifier();
4177  if (IDVal == "lower16") {
4178    RefKind = ARMMCExpr::VK_ARM_LO16;
4179  } else if (IDVal == "upper16") {
4180    RefKind = ARMMCExpr::VK_ARM_HI16;
4181  } else {
4182    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4183    return true;
4184  }
4185  Parser.Lex();
4186
4187  if (getLexer().isNot(AsmToken::Colon)) {
4188    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4189    return true;
4190  }
4191  Parser.Lex(); // Eat the last ':'
4192  return false;
4193}
4194
4195/// \brief Given a mnemonic, split out possible predication code and carry
4196/// setting letters to form a canonical mnemonic and flags.
4197//
4198// FIXME: Would be nice to autogen this.
4199// FIXME: This is a bit of a maze of special cases.
4200StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4201                                      unsigned &PredicationCode,
4202                                      bool &CarrySetting,
4203                                      unsigned &ProcessorIMod,
4204                                      StringRef &ITMask) {
4205  PredicationCode = ARMCC::AL;
4206  CarrySetting = false;
4207  ProcessorIMod = 0;
4208
4209  // Ignore some mnemonics we know aren't predicated forms.
4210  //
4211  // FIXME: Would be nice to autogen this.
4212  if ((Mnemonic == "movs" && isThumb()) ||
4213      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4214      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4215      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4216      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4217      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4218      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4219      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal")
4220    return Mnemonic;
4221
4222  // First, split out any predication code. Ignore mnemonics we know aren't
4223  // predicated but do have a carry-set and so weren't caught above.
4224  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4225      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4226      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4227      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4228    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4229      .Case("eq", ARMCC::EQ)
4230      .Case("ne", ARMCC::NE)
4231      .Case("hs", ARMCC::HS)
4232      .Case("cs", ARMCC::HS)
4233      .Case("lo", ARMCC::LO)
4234      .Case("cc", ARMCC::LO)
4235      .Case("mi", ARMCC::MI)
4236      .Case("pl", ARMCC::PL)
4237      .Case("vs", ARMCC::VS)
4238      .Case("vc", ARMCC::VC)
4239      .Case("hi", ARMCC::HI)
4240      .Case("ls", ARMCC::LS)
4241      .Case("ge", ARMCC::GE)
4242      .Case("lt", ARMCC::LT)
4243      .Case("gt", ARMCC::GT)
4244      .Case("le", ARMCC::LE)
4245      .Case("al", ARMCC::AL)
4246      .Default(~0U);
4247    if (CC != ~0U) {
4248      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4249      PredicationCode = CC;
4250    }
4251  }
4252
4253  // Next, determine if we have a carry setting bit. We explicitly ignore all
4254  // the instructions we know end in 's'.
4255  if (Mnemonic.endswith("s") &&
4256      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4257        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4258        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4259        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4260        Mnemonic == "vrsqrts" || Mnemonic == "srs" || Mnemonic == "flds" ||
4261        Mnemonic == "fmrs" ||
4262        (Mnemonic == "movs" && isThumb()))) {
4263    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4264    CarrySetting = true;
4265  }
4266
4267  // The "cps" instruction can have a interrupt mode operand which is glued into
4268  // the mnemonic. Check if this is the case, split it and parse the imod op
4269  if (Mnemonic.startswith("cps")) {
4270    // Split out any imod code.
4271    unsigned IMod =
4272      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4273      .Case("ie", ARM_PROC::IE)
4274      .Case("id", ARM_PROC::ID)
4275      .Default(~0U);
4276    if (IMod != ~0U) {
4277      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4278      ProcessorIMod = IMod;
4279    }
4280  }
4281
4282  // The "it" instruction has the condition mask on the end of the mnemonic.
4283  if (Mnemonic.startswith("it")) {
4284    ITMask = Mnemonic.slice(2, Mnemonic.size());
4285    Mnemonic = Mnemonic.slice(0, 2);
4286  }
4287
4288  return Mnemonic;
4289}
4290
4291/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4292/// inclusion of carry set or predication code operands.
4293//
4294// FIXME: It would be nice to autogen this.
4295void ARMAsmParser::
4296getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4297                      bool &CanAcceptPredicationCode) {
4298  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4299      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4300      Mnemonic == "add" || Mnemonic == "adc" ||
4301      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4302      Mnemonic == "orr" || Mnemonic == "mvn" ||
4303      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4304      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4305      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4306                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4307                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4308    CanAcceptCarrySet = true;
4309  } else
4310    CanAcceptCarrySet = false;
4311
4312  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4313      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4314      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4315      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4316      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4317      (Mnemonic == "clrex" && !isThumb()) ||
4318      (Mnemonic == "nop" && isThumbOne()) ||
4319      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4320        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4321        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4322      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4323       !isThumb()) ||
4324      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4325    CanAcceptPredicationCode = false;
4326  } else
4327    CanAcceptPredicationCode = true;
4328
4329  if (isThumb()) {
4330    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4331        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4332      CanAcceptPredicationCode = false;
4333  }
4334}
4335
4336bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4337                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4338  // FIXME: This is all horribly hacky. We really need a better way to deal
4339  // with optional operands like this in the matcher table.
4340
4341  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4342  // another does not. Specifically, the MOVW instruction does not. So we
4343  // special case it here and remove the defaulted (non-setting) cc_out
4344  // operand if that's the instruction we're trying to match.
4345  //
4346  // We do this as post-processing of the explicit operands rather than just
4347  // conditionally adding the cc_out in the first place because we need
4348  // to check the type of the parsed immediate operand.
4349  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4350      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4351      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4352      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4353    return true;
4354
4355  // Register-register 'add' for thumb does not have a cc_out operand
4356  // when there are only two register operands.
4357  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4358      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4359      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4360      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4361    return true;
4362  // Register-register 'add' for thumb does not have a cc_out operand
4363  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4364  // have to check the immediate range here since Thumb2 has a variant
4365  // that can handle a different range and has a cc_out operand.
4366  if (((isThumb() && Mnemonic == "add") ||
4367       (isThumbTwo() && Mnemonic == "sub")) &&
4368      Operands.size() == 6 &&
4369      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4370      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4371      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4372      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4373      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4374       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4375    return true;
4376  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4377  // imm0_4095 variant. That's the least-preferred variant when
4378  // selecting via the generic "add" mnemonic, so to know that we
4379  // should remove the cc_out operand, we have to explicitly check that
4380  // it's not one of the other variants. Ugh.
4381  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4382      Operands.size() == 6 &&
4383      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4384      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4385      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4386    // Nest conditions rather than one big 'if' statement for readability.
4387    //
4388    // If either register is a high reg, it's either one of the SP
4389    // variants (handled above) or a 32-bit encoding, so we just
4390    // check against T3.
4391    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4392         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4393        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4394      return false;
4395    // If both registers are low, we're in an IT block, and the immediate is
4396    // in range, we should use encoding T1 instead, which has a cc_out.
4397    if (inITBlock() &&
4398        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4399        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4400        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4401      return false;
4402
4403    // Otherwise, we use encoding T4, which does not have a cc_out
4404    // operand.
4405    return true;
4406  }
4407
4408  // The thumb2 multiply instruction doesn't have a CCOut register, so
4409  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4410  // use the 16-bit encoding or not.
4411  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4412      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4413      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4414      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4415      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4416      // If the registers aren't low regs, the destination reg isn't the
4417      // same as one of the source regs, or the cc_out operand is zero
4418      // outside of an IT block, we have to use the 32-bit encoding, so
4419      // remove the cc_out operand.
4420      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4421       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4422       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4423       !inITBlock() ||
4424       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4425        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4426        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4427        static_cast<ARMOperand*>(Operands[4])->getReg())))
4428    return true;
4429
4430  // Also check the 'mul' syntax variant that doesn't specify an explicit
4431  // destination register.
4432  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4433      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4434      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4435      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4436      // If the registers aren't low regs  or the cc_out operand is zero
4437      // outside of an IT block, we have to use the 32-bit encoding, so
4438      // remove the cc_out operand.
4439      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4440       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4441       !inITBlock()))
4442    return true;
4443
4444
4445
4446  // Register-register 'add/sub' for thumb does not have a cc_out operand
4447  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4448  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4449  // right, this will result in better diagnostics (which operand is off)
4450  // anyway.
4451  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4452      (Operands.size() == 5 || Operands.size() == 6) &&
4453      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4454      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4455      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4456    return true;
4457
4458  return false;
4459}
4460
4461static bool isDataTypeToken(StringRef Tok) {
4462  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4463    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4464    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4465    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4466    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4467    Tok == ".f" || Tok == ".d";
4468}
4469
4470// FIXME: This bit should probably be handled via an explicit match class
4471// in the .td files that matches the suffix instead of having it be
4472// a literal string token the way it is now.
4473static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4474  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4475}
4476
4477/// Parse an arm instruction mnemonic followed by its operands.
4478bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4479                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4480  // Create the leading tokens for the mnemonic, split by '.' characters.
4481  size_t Start = 0, Next = Name.find('.');
4482  StringRef Mnemonic = Name.slice(Start, Next);
4483
4484  // Split out the predication code and carry setting flag from the mnemonic.
4485  unsigned PredicationCode;
4486  unsigned ProcessorIMod;
4487  bool CarrySetting;
4488  StringRef ITMask;
4489  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4490                           ProcessorIMod, ITMask);
4491
4492  // In Thumb1, only the branch (B) instruction can be predicated.
4493  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4494    Parser.EatToEndOfStatement();
4495    return Error(NameLoc, "conditional execution not supported in Thumb1");
4496  }
4497
4498  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4499
4500  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4501  // is the mask as it will be for the IT encoding if the conditional
4502  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4503  // where the conditional bit0 is zero, the instruction post-processing
4504  // will adjust the mask accordingly.
4505  if (Mnemonic == "it") {
4506    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4507    if (ITMask.size() > 3) {
4508      Parser.EatToEndOfStatement();
4509      return Error(Loc, "too many conditions on IT instruction");
4510    }
4511    unsigned Mask = 8;
4512    for (unsigned i = ITMask.size(); i != 0; --i) {
4513      char pos = ITMask[i - 1];
4514      if (pos != 't' && pos != 'e') {
4515        Parser.EatToEndOfStatement();
4516        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4517      }
4518      Mask >>= 1;
4519      if (ITMask[i - 1] == 't')
4520        Mask |= 8;
4521    }
4522    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4523  }
4524
4525  // FIXME: This is all a pretty gross hack. We should automatically handle
4526  // optional operands like this via tblgen.
4527
4528  // Next, add the CCOut and ConditionCode operands, if needed.
4529  //
4530  // For mnemonics which can ever incorporate a carry setting bit or predication
4531  // code, our matching model involves us always generating CCOut and
4532  // ConditionCode operands to match the mnemonic "as written" and then we let
4533  // the matcher deal with finding the right instruction or generating an
4534  // appropriate error.
4535  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4536  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4537
4538  // If we had a carry-set on an instruction that can't do that, issue an
4539  // error.
4540  if (!CanAcceptCarrySet && CarrySetting) {
4541    Parser.EatToEndOfStatement();
4542    return Error(NameLoc, "instruction '" + Mnemonic +
4543                 "' can not set flags, but 's' suffix specified");
4544  }
4545  // If we had a predication code on an instruction that can't do that, issue an
4546  // error.
4547  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4548    Parser.EatToEndOfStatement();
4549    return Error(NameLoc, "instruction '" + Mnemonic +
4550                 "' is not predicable, but condition code specified");
4551  }
4552
4553  // Add the carry setting operand, if necessary.
4554  if (CanAcceptCarrySet) {
4555    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4556    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4557                                               Loc));
4558  }
4559
4560  // Add the predication code operand, if necessary.
4561  if (CanAcceptPredicationCode) {
4562    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4563                                      CarrySetting);
4564    Operands.push_back(ARMOperand::CreateCondCode(
4565                         ARMCC::CondCodes(PredicationCode), Loc));
4566  }
4567
4568  // Add the processor imod operand, if necessary.
4569  if (ProcessorIMod) {
4570    Operands.push_back(ARMOperand::CreateImm(
4571          MCConstantExpr::Create(ProcessorIMod, getContext()),
4572                                 NameLoc, NameLoc));
4573  }
4574
4575  // Add the remaining tokens in the mnemonic.
4576  while (Next != StringRef::npos) {
4577    Start = Next;
4578    Next = Name.find('.', Start + 1);
4579    StringRef ExtraToken = Name.slice(Start, Next);
4580
4581    // Some NEON instructions have an optional datatype suffix that is
4582    // completely ignored. Check for that.
4583    if (isDataTypeToken(ExtraToken) &&
4584        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4585      continue;
4586
4587    if (ExtraToken != ".n") {
4588      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4589      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4590    }
4591  }
4592
4593  // Read the remaining operands.
4594  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4595    // Read the first operand.
4596    if (parseOperand(Operands, Mnemonic)) {
4597      Parser.EatToEndOfStatement();
4598      return true;
4599    }
4600
4601    while (getLexer().is(AsmToken::Comma)) {
4602      Parser.Lex();  // Eat the comma.
4603
4604      // Parse and remember the operand.
4605      if (parseOperand(Operands, Mnemonic)) {
4606        Parser.EatToEndOfStatement();
4607        return true;
4608      }
4609    }
4610  }
4611
4612  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4613    SMLoc Loc = getLexer().getLoc();
4614    Parser.EatToEndOfStatement();
4615    return Error(Loc, "unexpected token in argument list");
4616  }
4617
4618  Parser.Lex(); // Consume the EndOfStatement
4619
4620  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4621  // do and don't have a cc_out optional-def operand. With some spot-checks
4622  // of the operand list, we can figure out which variant we're trying to
4623  // parse and adjust accordingly before actually matching. We shouldn't ever
4624  // try to remove a cc_out operand that was explicitly set on the the
4625  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4626  // table driven matcher doesn't fit well with the ARM instruction set.
4627  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4628    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4629    Operands.erase(Operands.begin() + 1);
4630    delete Op;
4631  }
4632
4633  // ARM mode 'blx' need special handling, as the register operand version
4634  // is predicable, but the label operand version is not. So, we can't rely
4635  // on the Mnemonic based checking to correctly figure out when to put
4636  // a k_CondCode operand in the list. If we're trying to match the label
4637  // version, remove the k_CondCode operand here.
4638  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4639      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4640    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4641    Operands.erase(Operands.begin() + 1);
4642    delete Op;
4643  }
4644
4645  // The vector-compare-to-zero instructions have a literal token "#0" at
4646  // the end that comes to here as an immediate operand. Convert it to a
4647  // token to play nicely with the matcher.
4648  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4649      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4650      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4651    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4652    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4653    if (CE && CE->getValue() == 0) {
4654      Operands.erase(Operands.begin() + 5);
4655      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4656      delete Op;
4657    }
4658  }
4659  // VCMP{E} does the same thing, but with a different operand count.
4660  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4661      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4662    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4663    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4664    if (CE && CE->getValue() == 0) {
4665      Operands.erase(Operands.begin() + 4);
4666      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4667      delete Op;
4668    }
4669  }
4670  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4671  // end. Convert it to a token here.
4672  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4673      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4674    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4675    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4676    if (CE && CE->getValue() == 0) {
4677      Operands.erase(Operands.begin() + 5);
4678      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4679      delete Op;
4680    }
4681  }
4682
4683  return false;
4684}
4685
4686// Validate context-sensitive operand constraints.
4687
4688// return 'true' if register list contains non-low GPR registers,
4689// 'false' otherwise. If Reg is in the register list or is HiReg, set
4690// 'containsReg' to true.
4691static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4692                                 unsigned HiReg, bool &containsReg) {
4693  containsReg = false;
4694  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4695    unsigned OpReg = Inst.getOperand(i).getReg();
4696    if (OpReg == Reg)
4697      containsReg = true;
4698    // Anything other than a low register isn't legal here.
4699    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4700      return true;
4701  }
4702  return false;
4703}
4704
4705// Check if the specified regisgter is in the register list of the inst,
4706// starting at the indicated operand number.
4707static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4708  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4709    unsigned OpReg = Inst.getOperand(i).getReg();
4710    if (OpReg == Reg)
4711      return true;
4712  }
4713  return false;
4714}
4715
4716// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4717// the ARMInsts array) instead. Getting that here requires awkward
4718// API changes, though. Better way?
4719namespace llvm {
4720extern const MCInstrDesc ARMInsts[];
4721}
4722static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4723  return ARMInsts[Opcode];
4724}
4725
4726// FIXME: We would really like to be able to tablegen'erate this.
4727bool ARMAsmParser::
4728validateInstruction(MCInst &Inst,
4729                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4730  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4731  SMLoc Loc = Operands[0]->getStartLoc();
4732  // Check the IT block state first.
4733  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4734  // being allowed in IT blocks, but not being predicable.  It just always
4735  // executes.
4736  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4737    unsigned bit = 1;
4738    if (ITState.FirstCond)
4739      ITState.FirstCond = false;
4740    else
4741      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4742    // The instruction must be predicable.
4743    if (!MCID.isPredicable())
4744      return Error(Loc, "instructions in IT block must be predicable");
4745    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4746    unsigned ITCond = bit ? ITState.Cond :
4747      ARMCC::getOppositeCondition(ITState.Cond);
4748    if (Cond != ITCond) {
4749      // Find the condition code Operand to get its SMLoc information.
4750      SMLoc CondLoc;
4751      for (unsigned i = 1; i < Operands.size(); ++i)
4752        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4753          CondLoc = Operands[i]->getStartLoc();
4754      return Error(CondLoc, "incorrect condition in IT block; got '" +
4755                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4756                   "', but expected '" +
4757                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4758    }
4759  // Check for non-'al' condition codes outside of the IT block.
4760  } else if (isThumbTwo() && MCID.isPredicable() &&
4761             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4762             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4763             Inst.getOpcode() != ARM::t2B)
4764    return Error(Loc, "predicated instructions must be in IT block");
4765
4766  switch (Inst.getOpcode()) {
4767  case ARM::LDRD:
4768  case ARM::LDRD_PRE:
4769  case ARM::LDRD_POST:
4770  case ARM::LDREXD: {
4771    // Rt2 must be Rt + 1.
4772    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4773    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4774    if (Rt2 != Rt + 1)
4775      return Error(Operands[3]->getStartLoc(),
4776                   "destination operands must be sequential");
4777    return false;
4778  }
4779  case ARM::STRD: {
4780    // Rt2 must be Rt + 1.
4781    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4782    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4783    if (Rt2 != Rt + 1)
4784      return Error(Operands[3]->getStartLoc(),
4785                   "source operands must be sequential");
4786    return false;
4787  }
4788  case ARM::STRD_PRE:
4789  case ARM::STRD_POST:
4790  case ARM::STREXD: {
4791    // Rt2 must be Rt + 1.
4792    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4793    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
4794    if (Rt2 != Rt + 1)
4795      return Error(Operands[3]->getStartLoc(),
4796                   "source operands must be sequential");
4797    return false;
4798  }
4799  case ARM::SBFX:
4800  case ARM::UBFX: {
4801    // width must be in range [1, 32-lsb]
4802    unsigned lsb = Inst.getOperand(2).getImm();
4803    unsigned widthm1 = Inst.getOperand(3).getImm();
4804    if (widthm1 >= 32 - lsb)
4805      return Error(Operands[5]->getStartLoc(),
4806                   "bitfield width must be in range [1,32-lsb]");
4807    return false;
4808  }
4809  case ARM::tLDMIA: {
4810    // If we're parsing Thumb2, the .w variant is available and handles
4811    // most cases that are normally illegal for a Thumb1 LDM
4812    // instruction. We'll make the transformation in processInstruction()
4813    // if necessary.
4814    //
4815    // Thumb LDM instructions are writeback iff the base register is not
4816    // in the register list.
4817    unsigned Rn = Inst.getOperand(0).getReg();
4818    bool hasWritebackToken =
4819      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
4820       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
4821    bool listContainsBase;
4822    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
4823      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
4824                   "registers must be in range r0-r7");
4825    // If we should have writeback, then there should be a '!' token.
4826    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
4827      return Error(Operands[2]->getStartLoc(),
4828                   "writeback operator '!' expected");
4829    // If we should not have writeback, there must not be a '!'. This is
4830    // true even for the 32-bit wide encodings.
4831    if (listContainsBase && hasWritebackToken)
4832      return Error(Operands[3]->getStartLoc(),
4833                   "writeback operator '!' not allowed when base register "
4834                   "in register list");
4835
4836    break;
4837  }
4838  case ARM::t2LDMIA_UPD: {
4839    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
4840      return Error(Operands[4]->getStartLoc(),
4841                   "writeback operator '!' not allowed when base register "
4842                   "in register list");
4843    break;
4844  }
4845  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
4846  // so only issue a diagnostic for thumb1. The instructions will be
4847  // switched to the t2 encodings in processInstruction() if necessary.
4848  case ARM::tPOP: {
4849    bool listContainsBase;
4850    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
4851        !isThumbTwo())
4852      return Error(Operands[2]->getStartLoc(),
4853                   "registers must be in range r0-r7 or pc");
4854    break;
4855  }
4856  case ARM::tPUSH: {
4857    bool listContainsBase;
4858    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
4859        !isThumbTwo())
4860      return Error(Operands[2]->getStartLoc(),
4861                   "registers must be in range r0-r7 or lr");
4862    break;
4863  }
4864  case ARM::tSTMIA_UPD: {
4865    bool listContainsBase;
4866    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
4867      return Error(Operands[4]->getStartLoc(),
4868                   "registers must be in range r0-r7");
4869    break;
4870  }
4871  }
4872
4873  return false;
4874}
4875
4876static unsigned getRealVSTLNOpcode(unsigned Opc) {
4877  switch(Opc) {
4878  default: assert(0 && "unexpected opcode!");
4879  case ARM::VST1LNdWB_fixed_Asm_8:   return ARM::VST1LNd8_UPD;
4880  case ARM::VST1LNdWB_fixed_Asm_P8:  return ARM::VST1LNd8_UPD;
4881  case ARM::VST1LNdWB_fixed_Asm_I8:  return ARM::VST1LNd8_UPD;
4882  case ARM::VST1LNdWB_fixed_Asm_S8:  return ARM::VST1LNd8_UPD;
4883  case ARM::VST1LNdWB_fixed_Asm_U8:  return ARM::VST1LNd8_UPD;
4884  case ARM::VST1LNdWB_fixed_Asm_16:  return ARM::VST1LNd16_UPD;
4885  case ARM::VST1LNdWB_fixed_Asm_P16: return ARM::VST1LNd16_UPD;
4886  case ARM::VST1LNdWB_fixed_Asm_I16: return ARM::VST1LNd16_UPD;
4887  case ARM::VST1LNdWB_fixed_Asm_S16: return ARM::VST1LNd16_UPD;
4888  case ARM::VST1LNdWB_fixed_Asm_U16: return ARM::VST1LNd16_UPD;
4889  case ARM::VST1LNdWB_fixed_Asm_32:  return ARM::VST1LNd32_UPD;
4890  case ARM::VST1LNdWB_fixed_Asm_F:   return ARM::VST1LNd32_UPD;
4891  case ARM::VST1LNdWB_fixed_Asm_F32: return ARM::VST1LNd32_UPD;
4892  case ARM::VST1LNdWB_fixed_Asm_I32: return ARM::VST1LNd32_UPD;
4893  case ARM::VST1LNdWB_fixed_Asm_S32: return ARM::VST1LNd32_UPD;
4894  case ARM::VST1LNdWB_fixed_Asm_U32: return ARM::VST1LNd32_UPD;
4895  case ARM::VST1LNdWB_register_Asm_8:   return ARM::VST1LNd8_UPD;
4896  case ARM::VST1LNdWB_register_Asm_P8:  return ARM::VST1LNd8_UPD;
4897  case ARM::VST1LNdWB_register_Asm_I8:  return ARM::VST1LNd8_UPD;
4898  case ARM::VST1LNdWB_register_Asm_S8:  return ARM::VST1LNd8_UPD;
4899  case ARM::VST1LNdWB_register_Asm_U8:  return ARM::VST1LNd8_UPD;
4900  case ARM::VST1LNdWB_register_Asm_16:  return ARM::VST1LNd16_UPD;
4901  case ARM::VST1LNdWB_register_Asm_P16: return ARM::VST1LNd16_UPD;
4902  case ARM::VST1LNdWB_register_Asm_I16: return ARM::VST1LNd16_UPD;
4903  case ARM::VST1LNdWB_register_Asm_S16: return ARM::VST1LNd16_UPD;
4904  case ARM::VST1LNdWB_register_Asm_U16: return ARM::VST1LNd16_UPD;
4905  case ARM::VST1LNdWB_register_Asm_32:  return ARM::VST1LNd32_UPD;
4906  case ARM::VST1LNdWB_register_Asm_F:   return ARM::VST1LNd32_UPD;
4907  case ARM::VST1LNdWB_register_Asm_F32: return ARM::VST1LNd32_UPD;
4908  case ARM::VST1LNdWB_register_Asm_I32: return ARM::VST1LNd32_UPD;
4909  case ARM::VST1LNdWB_register_Asm_S32: return ARM::VST1LNd32_UPD;
4910  case ARM::VST1LNdWB_register_Asm_U32: return ARM::VST1LNd32_UPD;
4911  case ARM::VST1LNdAsm_8:   return ARM::VST1LNd8;
4912  case ARM::VST1LNdAsm_P8:  return ARM::VST1LNd8;
4913  case ARM::VST1LNdAsm_I8:  return ARM::VST1LNd8;
4914  case ARM::VST1LNdAsm_S8:  return ARM::VST1LNd8;
4915  case ARM::VST1LNdAsm_U8:  return ARM::VST1LNd8;
4916  case ARM::VST1LNdAsm_16:  return ARM::VST1LNd16;
4917  case ARM::VST1LNdAsm_P16: return ARM::VST1LNd16;
4918  case ARM::VST1LNdAsm_I16: return ARM::VST1LNd16;
4919  case ARM::VST1LNdAsm_S16: return ARM::VST1LNd16;
4920  case ARM::VST1LNdAsm_U16: return ARM::VST1LNd16;
4921  case ARM::VST1LNdAsm_32:  return ARM::VST1LNd32;
4922  case ARM::VST1LNdAsm_F:   return ARM::VST1LNd32;
4923  case ARM::VST1LNdAsm_F32: return ARM::VST1LNd32;
4924  case ARM::VST1LNdAsm_I32: return ARM::VST1LNd32;
4925  case ARM::VST1LNdAsm_S32: return ARM::VST1LNd32;
4926  case ARM::VST1LNdAsm_U32: return ARM::VST1LNd32;
4927  }
4928}
4929
4930static unsigned getRealVLDLNOpcode(unsigned Opc) {
4931  switch(Opc) {
4932  default: assert(0 && "unexpected opcode!");
4933  case ARM::VLD1LNdWB_fixed_Asm_8:   return ARM::VLD1LNd8_UPD;
4934  case ARM::VLD1LNdWB_fixed_Asm_P8:  return ARM::VLD1LNd8_UPD;
4935  case ARM::VLD1LNdWB_fixed_Asm_I8:  return ARM::VLD1LNd8_UPD;
4936  case ARM::VLD1LNdWB_fixed_Asm_S8:  return ARM::VLD1LNd8_UPD;
4937  case ARM::VLD1LNdWB_fixed_Asm_U8:  return ARM::VLD1LNd8_UPD;
4938  case ARM::VLD1LNdWB_fixed_Asm_16:  return ARM::VLD1LNd16_UPD;
4939  case ARM::VLD1LNdWB_fixed_Asm_P16: return ARM::VLD1LNd16_UPD;
4940  case ARM::VLD1LNdWB_fixed_Asm_I16: return ARM::VLD1LNd16_UPD;
4941  case ARM::VLD1LNdWB_fixed_Asm_S16: return ARM::VLD1LNd16_UPD;
4942  case ARM::VLD1LNdWB_fixed_Asm_U16: return ARM::VLD1LNd16_UPD;
4943  case ARM::VLD1LNdWB_fixed_Asm_32:  return ARM::VLD1LNd32_UPD;
4944  case ARM::VLD1LNdWB_fixed_Asm_F:   return ARM::VLD1LNd32_UPD;
4945  case ARM::VLD1LNdWB_fixed_Asm_F32: return ARM::VLD1LNd32_UPD;
4946  case ARM::VLD1LNdWB_fixed_Asm_I32: return ARM::VLD1LNd32_UPD;
4947  case ARM::VLD1LNdWB_fixed_Asm_S32: return ARM::VLD1LNd32_UPD;
4948  case ARM::VLD1LNdWB_fixed_Asm_U32: return ARM::VLD1LNd32_UPD;
4949  case ARM::VLD1LNdWB_register_Asm_8:   return ARM::VLD1LNd8_UPD;
4950  case ARM::VLD1LNdWB_register_Asm_P8:  return ARM::VLD1LNd8_UPD;
4951  case ARM::VLD1LNdWB_register_Asm_I8:  return ARM::VLD1LNd8_UPD;
4952  case ARM::VLD1LNdWB_register_Asm_S8:  return ARM::VLD1LNd8_UPD;
4953  case ARM::VLD1LNdWB_register_Asm_U8:  return ARM::VLD1LNd8_UPD;
4954  case ARM::VLD1LNdWB_register_Asm_16:  return ARM::VLD1LNd16_UPD;
4955  case ARM::VLD1LNdWB_register_Asm_P16: return ARM::VLD1LNd16_UPD;
4956  case ARM::VLD1LNdWB_register_Asm_I16: return ARM::VLD1LNd16_UPD;
4957  case ARM::VLD1LNdWB_register_Asm_S16: return ARM::VLD1LNd16_UPD;
4958  case ARM::VLD1LNdWB_register_Asm_U16: return ARM::VLD1LNd16_UPD;
4959  case ARM::VLD1LNdWB_register_Asm_32:  return ARM::VLD1LNd32_UPD;
4960  case ARM::VLD1LNdWB_register_Asm_F:   return ARM::VLD1LNd32_UPD;
4961  case ARM::VLD1LNdWB_register_Asm_F32: return ARM::VLD1LNd32_UPD;
4962  case ARM::VLD1LNdWB_register_Asm_I32: return ARM::VLD1LNd32_UPD;
4963  case ARM::VLD1LNdWB_register_Asm_S32: return ARM::VLD1LNd32_UPD;
4964  case ARM::VLD1LNdWB_register_Asm_U32: return ARM::VLD1LNd32_UPD;
4965  case ARM::VLD1LNdAsm_8:   return ARM::VLD1LNd8;
4966  case ARM::VLD1LNdAsm_P8:  return ARM::VLD1LNd8;
4967  case ARM::VLD1LNdAsm_I8:  return ARM::VLD1LNd8;
4968  case ARM::VLD1LNdAsm_S8:  return ARM::VLD1LNd8;
4969  case ARM::VLD1LNdAsm_U8:  return ARM::VLD1LNd8;
4970  case ARM::VLD1LNdAsm_16:  return ARM::VLD1LNd16;
4971  case ARM::VLD1LNdAsm_P16: return ARM::VLD1LNd16;
4972  case ARM::VLD1LNdAsm_I16: return ARM::VLD1LNd16;
4973  case ARM::VLD1LNdAsm_S16: return ARM::VLD1LNd16;
4974  case ARM::VLD1LNdAsm_U16: return ARM::VLD1LNd16;
4975  case ARM::VLD1LNdAsm_32:  return ARM::VLD1LNd32;
4976  case ARM::VLD1LNdAsm_F:   return ARM::VLD1LNd32;
4977  case ARM::VLD1LNdAsm_F32: return ARM::VLD1LNd32;
4978  case ARM::VLD1LNdAsm_I32: return ARM::VLD1LNd32;
4979  case ARM::VLD1LNdAsm_S32: return ARM::VLD1LNd32;
4980  case ARM::VLD1LNdAsm_U32: return ARM::VLD1LNd32;
4981  }
4982}
4983
4984bool ARMAsmParser::
4985processInstruction(MCInst &Inst,
4986                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4987  switch (Inst.getOpcode()) {
4988  // Handle NEON VST1 complex aliases.
4989  case ARM::VST1LNdWB_register_Asm_8:
4990  case ARM::VST1LNdWB_register_Asm_P8:
4991  case ARM::VST1LNdWB_register_Asm_I8:
4992  case ARM::VST1LNdWB_register_Asm_S8:
4993  case ARM::VST1LNdWB_register_Asm_U8:
4994  case ARM::VST1LNdWB_register_Asm_16:
4995  case ARM::VST1LNdWB_register_Asm_P16:
4996  case ARM::VST1LNdWB_register_Asm_I16:
4997  case ARM::VST1LNdWB_register_Asm_S16:
4998  case ARM::VST1LNdWB_register_Asm_U16:
4999  case ARM::VST1LNdWB_register_Asm_32:
5000  case ARM::VST1LNdWB_register_Asm_F:
5001  case ARM::VST1LNdWB_register_Asm_F32:
5002  case ARM::VST1LNdWB_register_Asm_I32:
5003  case ARM::VST1LNdWB_register_Asm_S32:
5004  case ARM::VST1LNdWB_register_Asm_U32: {
5005    MCInst TmpInst;
5006    // Shuffle the operands around so the lane index operand is in the
5007    // right place.
5008    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5009    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5010    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5011    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5012    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5013    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5014    TmpInst.addOperand(Inst.getOperand(1)); // lane
5015    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5016    TmpInst.addOperand(Inst.getOperand(6));
5017    Inst = TmpInst;
5018    return true;
5019  }
5020  case ARM::VST1LNdWB_fixed_Asm_8:
5021  case ARM::VST1LNdWB_fixed_Asm_P8:
5022  case ARM::VST1LNdWB_fixed_Asm_I8:
5023  case ARM::VST1LNdWB_fixed_Asm_S8:
5024  case ARM::VST1LNdWB_fixed_Asm_U8:
5025  case ARM::VST1LNdWB_fixed_Asm_16:
5026  case ARM::VST1LNdWB_fixed_Asm_P16:
5027  case ARM::VST1LNdWB_fixed_Asm_I16:
5028  case ARM::VST1LNdWB_fixed_Asm_S16:
5029  case ARM::VST1LNdWB_fixed_Asm_U16:
5030  case ARM::VST1LNdWB_fixed_Asm_32:
5031  case ARM::VST1LNdWB_fixed_Asm_F:
5032  case ARM::VST1LNdWB_fixed_Asm_F32:
5033  case ARM::VST1LNdWB_fixed_Asm_I32:
5034  case ARM::VST1LNdWB_fixed_Asm_S32:
5035  case ARM::VST1LNdWB_fixed_Asm_U32: {
5036    MCInst TmpInst;
5037    // Shuffle the operands around so the lane index operand is in the
5038    // right place.
5039    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5040    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5041    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5042    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5043    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5044    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5045    TmpInst.addOperand(Inst.getOperand(1)); // lane
5046    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5047    TmpInst.addOperand(Inst.getOperand(5));
5048    Inst = TmpInst;
5049    return true;
5050  }
5051  case ARM::VST1LNdAsm_8:
5052  case ARM::VST1LNdAsm_P8:
5053  case ARM::VST1LNdAsm_I8:
5054  case ARM::VST1LNdAsm_S8:
5055  case ARM::VST1LNdAsm_U8:
5056  case ARM::VST1LNdAsm_16:
5057  case ARM::VST1LNdAsm_P16:
5058  case ARM::VST1LNdAsm_I16:
5059  case ARM::VST1LNdAsm_S16:
5060  case ARM::VST1LNdAsm_U16:
5061  case ARM::VST1LNdAsm_32:
5062  case ARM::VST1LNdAsm_F:
5063  case ARM::VST1LNdAsm_F32:
5064  case ARM::VST1LNdAsm_I32:
5065  case ARM::VST1LNdAsm_S32:
5066  case ARM::VST1LNdAsm_U32: {
5067    MCInst TmpInst;
5068    // Shuffle the operands around so the lane index operand is in the
5069    // right place.
5070    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
5071    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5072    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5073    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5074    TmpInst.addOperand(Inst.getOperand(1)); // lane
5075    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5076    TmpInst.addOperand(Inst.getOperand(5));
5077    Inst = TmpInst;
5078    return true;
5079  }
5080  // Handle NEON VLD1 complex aliases.
5081  case ARM::VLD1LNdWB_register_Asm_8:
5082  case ARM::VLD1LNdWB_register_Asm_P8:
5083  case ARM::VLD1LNdWB_register_Asm_I8:
5084  case ARM::VLD1LNdWB_register_Asm_S8:
5085  case ARM::VLD1LNdWB_register_Asm_U8:
5086  case ARM::VLD1LNdWB_register_Asm_16:
5087  case ARM::VLD1LNdWB_register_Asm_P16:
5088  case ARM::VLD1LNdWB_register_Asm_I16:
5089  case ARM::VLD1LNdWB_register_Asm_S16:
5090  case ARM::VLD1LNdWB_register_Asm_U16:
5091  case ARM::VLD1LNdWB_register_Asm_32:
5092  case ARM::VLD1LNdWB_register_Asm_F:
5093  case ARM::VLD1LNdWB_register_Asm_F32:
5094  case ARM::VLD1LNdWB_register_Asm_I32:
5095  case ARM::VLD1LNdWB_register_Asm_S32:
5096  case ARM::VLD1LNdWB_register_Asm_U32: {
5097    MCInst TmpInst;
5098    // Shuffle the operands around so the lane index operand is in the
5099    // right place.
5100    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5101    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5102    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5103    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5104    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5105    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5106    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5107    TmpInst.addOperand(Inst.getOperand(1)); // lane
5108    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5109    TmpInst.addOperand(Inst.getOperand(6));
5110    Inst = TmpInst;
5111    return true;
5112  }
5113  case ARM::VLD1LNdWB_fixed_Asm_8:
5114  case ARM::VLD1LNdWB_fixed_Asm_P8:
5115  case ARM::VLD1LNdWB_fixed_Asm_I8:
5116  case ARM::VLD1LNdWB_fixed_Asm_S8:
5117  case ARM::VLD1LNdWB_fixed_Asm_U8:
5118  case ARM::VLD1LNdWB_fixed_Asm_16:
5119  case ARM::VLD1LNdWB_fixed_Asm_P16:
5120  case ARM::VLD1LNdWB_fixed_Asm_I16:
5121  case ARM::VLD1LNdWB_fixed_Asm_S16:
5122  case ARM::VLD1LNdWB_fixed_Asm_U16:
5123  case ARM::VLD1LNdWB_fixed_Asm_32:
5124  case ARM::VLD1LNdWB_fixed_Asm_F:
5125  case ARM::VLD1LNdWB_fixed_Asm_F32:
5126  case ARM::VLD1LNdWB_fixed_Asm_I32:
5127  case ARM::VLD1LNdWB_fixed_Asm_S32:
5128  case ARM::VLD1LNdWB_fixed_Asm_U32: {
5129    MCInst TmpInst;
5130    // Shuffle the operands around so the lane index operand is in the
5131    // right place.
5132    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5133    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5134    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5135    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5136    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5137    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5138    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5139    TmpInst.addOperand(Inst.getOperand(1)); // lane
5140    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5141    TmpInst.addOperand(Inst.getOperand(5));
5142    Inst = TmpInst;
5143    return true;
5144  }
5145  case ARM::VLD1LNdAsm_8:
5146  case ARM::VLD1LNdAsm_P8:
5147  case ARM::VLD1LNdAsm_I8:
5148  case ARM::VLD1LNdAsm_S8:
5149  case ARM::VLD1LNdAsm_U8:
5150  case ARM::VLD1LNdAsm_16:
5151  case ARM::VLD1LNdAsm_P16:
5152  case ARM::VLD1LNdAsm_I16:
5153  case ARM::VLD1LNdAsm_S16:
5154  case ARM::VLD1LNdAsm_U16:
5155  case ARM::VLD1LNdAsm_32:
5156  case ARM::VLD1LNdAsm_F:
5157  case ARM::VLD1LNdAsm_F32:
5158  case ARM::VLD1LNdAsm_I32:
5159  case ARM::VLD1LNdAsm_S32:
5160  case ARM::VLD1LNdAsm_U32: {
5161    MCInst TmpInst;
5162    // Shuffle the operands around so the lane index operand is in the
5163    // right place.
5164    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5165    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5166    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5167    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5168    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5169    TmpInst.addOperand(Inst.getOperand(1)); // lane
5170    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5171    TmpInst.addOperand(Inst.getOperand(5));
5172    Inst = TmpInst;
5173    return true;
5174  }
5175  // Handle the MOV complex aliases.
5176  case ARM::ASRr:
5177  case ARM::LSRr:
5178  case ARM::LSLr:
5179  case ARM::RORr: {
5180    ARM_AM::ShiftOpc ShiftTy;
5181    switch(Inst.getOpcode()) {
5182    default: llvm_unreachable("unexpected opcode!");
5183    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5184    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5185    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5186    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5187    }
5188    // A shift by zero is a plain MOVr, not a MOVsi.
5189    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5190    MCInst TmpInst;
5191    TmpInst.setOpcode(ARM::MOVsr);
5192    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5193    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5194    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5195    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5196    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5197    TmpInst.addOperand(Inst.getOperand(4));
5198    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5199    Inst = TmpInst;
5200    return true;
5201  }
5202  case ARM::ASRi:
5203  case ARM::LSRi:
5204  case ARM::LSLi:
5205  case ARM::RORi: {
5206    ARM_AM::ShiftOpc ShiftTy;
5207    switch(Inst.getOpcode()) {
5208    default: llvm_unreachable("unexpected opcode!");
5209    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5210    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5211    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5212    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5213    }
5214    // A shift by zero is a plain MOVr, not a MOVsi.
5215    unsigned Amt = Inst.getOperand(2).getImm();
5216    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5217    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5218    MCInst TmpInst;
5219    TmpInst.setOpcode(Opc);
5220    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5221    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5222    if (Opc == ARM::MOVsi)
5223      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5224    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5225    TmpInst.addOperand(Inst.getOperand(4));
5226    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5227    Inst = TmpInst;
5228    return true;
5229  }
5230  case ARM::RRXi: {
5231    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5232    MCInst TmpInst;
5233    TmpInst.setOpcode(ARM::MOVsi);
5234    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5235    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5236    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5237    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5238    TmpInst.addOperand(Inst.getOperand(3));
5239    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5240    Inst = TmpInst;
5241    return true;
5242  }
5243  case ARM::t2LDMIA_UPD: {
5244    // If this is a load of a single register, then we should use
5245    // a post-indexed LDR instruction instead, per the ARM ARM.
5246    if (Inst.getNumOperands() != 5)
5247      return false;
5248    MCInst TmpInst;
5249    TmpInst.setOpcode(ARM::t2LDR_POST);
5250    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5251    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5252    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5253    TmpInst.addOperand(MCOperand::CreateImm(4));
5254    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5255    TmpInst.addOperand(Inst.getOperand(3));
5256    Inst = TmpInst;
5257    return true;
5258  }
5259  case ARM::t2STMDB_UPD: {
5260    // If this is a store of a single register, then we should use
5261    // a pre-indexed STR instruction instead, per the ARM ARM.
5262    if (Inst.getNumOperands() != 5)
5263      return false;
5264    MCInst TmpInst;
5265    TmpInst.setOpcode(ARM::t2STR_PRE);
5266    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5267    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5268    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5269    TmpInst.addOperand(MCOperand::CreateImm(-4));
5270    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5271    TmpInst.addOperand(Inst.getOperand(3));
5272    Inst = TmpInst;
5273    return true;
5274  }
5275  case ARM::LDMIA_UPD:
5276    // If this is a load of a single register via a 'pop', then we should use
5277    // a post-indexed LDR instruction instead, per the ARM ARM.
5278    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5279        Inst.getNumOperands() == 5) {
5280      MCInst TmpInst;
5281      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5282      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5283      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5284      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5285      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5286      TmpInst.addOperand(MCOperand::CreateImm(4));
5287      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5288      TmpInst.addOperand(Inst.getOperand(3));
5289      Inst = TmpInst;
5290      return true;
5291    }
5292    break;
5293  case ARM::STMDB_UPD:
5294    // If this is a store of a single register via a 'push', then we should use
5295    // a pre-indexed STR instruction instead, per the ARM ARM.
5296    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5297        Inst.getNumOperands() == 5) {
5298      MCInst TmpInst;
5299      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5300      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5301      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5302      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5303      TmpInst.addOperand(MCOperand::CreateImm(-4));
5304      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5305      TmpInst.addOperand(Inst.getOperand(3));
5306      Inst = TmpInst;
5307    }
5308    break;
5309  case ARM::t2ADDri12:
5310    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5311    // mnemonic was used (not "addw"), encoding T3 is preferred.
5312    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5313        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5314      break;
5315    Inst.setOpcode(ARM::t2ADDri);
5316    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5317    break;
5318  case ARM::t2SUBri12:
5319    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5320    // mnemonic was used (not "subw"), encoding T3 is preferred.
5321    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5322        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5323      break;
5324    Inst.setOpcode(ARM::t2SUBri);
5325    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5326    break;
5327  case ARM::tADDi8:
5328    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5329    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5330    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5331    // to encoding T1 if <Rd> is omitted."
5332    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5333      Inst.setOpcode(ARM::tADDi3);
5334      return true;
5335    }
5336    break;
5337  case ARM::tSUBi8:
5338    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5339    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5340    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5341    // to encoding T1 if <Rd> is omitted."
5342    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5343      Inst.setOpcode(ARM::tSUBi3);
5344      return true;
5345    }
5346    break;
5347  case ARM::t2ADDrr: {
5348    // If the destination and first source operand are the same, and
5349    // there's no setting of the flags, use encoding T2 instead of T3.
5350    // Note that this is only for ADD, not SUB. This mirrors the system
5351    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5352    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5353        Inst.getOperand(5).getReg() != 0 ||
5354        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5355         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5356      break;
5357    MCInst TmpInst;
5358    TmpInst.setOpcode(ARM::tADDhirr);
5359    TmpInst.addOperand(Inst.getOperand(0));
5360    TmpInst.addOperand(Inst.getOperand(0));
5361    TmpInst.addOperand(Inst.getOperand(2));
5362    TmpInst.addOperand(Inst.getOperand(3));
5363    TmpInst.addOperand(Inst.getOperand(4));
5364    Inst = TmpInst;
5365    return true;
5366  }
5367  case ARM::tB:
5368    // A Thumb conditional branch outside of an IT block is a tBcc.
5369    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5370      Inst.setOpcode(ARM::tBcc);
5371      return true;
5372    }
5373    break;
5374  case ARM::t2B:
5375    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5376    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5377      Inst.setOpcode(ARM::t2Bcc);
5378      return true;
5379    }
5380    break;
5381  case ARM::t2Bcc:
5382    // If the conditional is AL or we're in an IT block, we really want t2B.
5383    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5384      Inst.setOpcode(ARM::t2B);
5385      return true;
5386    }
5387    break;
5388  case ARM::tBcc:
5389    // If the conditional is AL, we really want tB.
5390    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5391      Inst.setOpcode(ARM::tB);
5392      return true;
5393    }
5394    break;
5395  case ARM::tLDMIA: {
5396    // If the register list contains any high registers, or if the writeback
5397    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
5398    // instead if we're in Thumb2. Otherwise, this should have generated
5399    // an error in validateInstruction().
5400    unsigned Rn = Inst.getOperand(0).getReg();
5401    bool hasWritebackToken =
5402      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5403       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5404    bool listContainsBase;
5405    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
5406        (!listContainsBase && !hasWritebackToken) ||
5407        (listContainsBase && hasWritebackToken)) {
5408      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5409      assert (isThumbTwo());
5410      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
5411      // If we're switching to the updating version, we need to insert
5412      // the writeback tied operand.
5413      if (hasWritebackToken)
5414        Inst.insert(Inst.begin(),
5415                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
5416      return true;
5417    }
5418    break;
5419  }
5420  case ARM::tSTMIA_UPD: {
5421    // If the register list contains any high registers, we need to use
5422    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5423    // should have generated an error in validateInstruction().
5424    unsigned Rn = Inst.getOperand(0).getReg();
5425    bool listContainsBase;
5426    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
5427      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5428      assert (isThumbTwo());
5429      Inst.setOpcode(ARM::t2STMIA_UPD);
5430      return true;
5431    }
5432    break;
5433  }
5434  case ARM::tPOP: {
5435    bool listContainsBase;
5436    // If the register list contains any high registers, we need to use
5437    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5438    // should have generated an error in validateInstruction().
5439    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
5440      return false;
5441    assert (isThumbTwo());
5442    Inst.setOpcode(ARM::t2LDMIA_UPD);
5443    // Add the base register and writeback operands.
5444    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5445    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5446    return true;
5447  }
5448  case ARM::tPUSH: {
5449    bool listContainsBase;
5450    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
5451      return false;
5452    assert (isThumbTwo());
5453    Inst.setOpcode(ARM::t2STMDB_UPD);
5454    // Add the base register and writeback operands.
5455    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5456    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5457    return true;
5458  }
5459  case ARM::t2MOVi: {
5460    // If we can use the 16-bit encoding and the user didn't explicitly
5461    // request the 32-bit variant, transform it here.
5462    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5463        Inst.getOperand(1).getImm() <= 255 &&
5464        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
5465         Inst.getOperand(4).getReg() == ARM::CPSR) ||
5466        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
5467        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5468         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5469      // The operands aren't in the same order for tMOVi8...
5470      MCInst TmpInst;
5471      TmpInst.setOpcode(ARM::tMOVi8);
5472      TmpInst.addOperand(Inst.getOperand(0));
5473      TmpInst.addOperand(Inst.getOperand(4));
5474      TmpInst.addOperand(Inst.getOperand(1));
5475      TmpInst.addOperand(Inst.getOperand(2));
5476      TmpInst.addOperand(Inst.getOperand(3));
5477      Inst = TmpInst;
5478      return true;
5479    }
5480    break;
5481  }
5482  case ARM::t2MOVr: {
5483    // If we can use the 16-bit encoding and the user didn't explicitly
5484    // request the 32-bit variant, transform it here.
5485    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5486        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5487        Inst.getOperand(2).getImm() == ARMCC::AL &&
5488        Inst.getOperand(4).getReg() == ARM::CPSR &&
5489        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5490         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5491      // The operands aren't the same for tMOV[S]r... (no cc_out)
5492      MCInst TmpInst;
5493      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
5494      TmpInst.addOperand(Inst.getOperand(0));
5495      TmpInst.addOperand(Inst.getOperand(1));
5496      TmpInst.addOperand(Inst.getOperand(2));
5497      TmpInst.addOperand(Inst.getOperand(3));
5498      Inst = TmpInst;
5499      return true;
5500    }
5501    break;
5502  }
5503  case ARM::t2SXTH:
5504  case ARM::t2SXTB:
5505  case ARM::t2UXTH:
5506  case ARM::t2UXTB: {
5507    // If we can use the 16-bit encoding and the user didn't explicitly
5508    // request the 32-bit variant, transform it here.
5509    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5510        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5511        Inst.getOperand(2).getImm() == 0 &&
5512        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5513         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5514      unsigned NewOpc;
5515      switch (Inst.getOpcode()) {
5516      default: llvm_unreachable("Illegal opcode!");
5517      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
5518      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
5519      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
5520      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
5521      }
5522      // The operands aren't the same for thumb1 (no rotate operand).
5523      MCInst TmpInst;
5524      TmpInst.setOpcode(NewOpc);
5525      TmpInst.addOperand(Inst.getOperand(0));
5526      TmpInst.addOperand(Inst.getOperand(1));
5527      TmpInst.addOperand(Inst.getOperand(3));
5528      TmpInst.addOperand(Inst.getOperand(4));
5529      Inst = TmpInst;
5530      return true;
5531    }
5532    break;
5533  }
5534  case ARM::t2IT: {
5535    // The mask bits for all but the first condition are represented as
5536    // the low bit of the condition code value implies 't'. We currently
5537    // always have 1 implies 't', so XOR toggle the bits if the low bit
5538    // of the condition code is zero. The encoding also expects the low
5539    // bit of the condition to be encoded as bit 4 of the mask operand,
5540    // so mask that in if needed
5541    MCOperand &MO = Inst.getOperand(1);
5542    unsigned Mask = MO.getImm();
5543    unsigned OrigMask = Mask;
5544    unsigned TZ = CountTrailingZeros_32(Mask);
5545    if ((Inst.getOperand(0).getImm() & 1) == 0) {
5546      assert(Mask && TZ <= 3 && "illegal IT mask value!");
5547      for (unsigned i = 3; i != TZ; --i)
5548        Mask ^= 1 << i;
5549    } else
5550      Mask |= 0x10;
5551    MO.setImm(Mask);
5552
5553    // Set up the IT block state according to the IT instruction we just
5554    // matched.
5555    assert(!inITBlock() && "nested IT blocks?!");
5556    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
5557    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
5558    ITState.CurPosition = 0;
5559    ITState.FirstCond = true;
5560    break;
5561  }
5562  }
5563  return false;
5564}
5565
5566unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
5567  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
5568  // suffix depending on whether they're in an IT block or not.
5569  unsigned Opc = Inst.getOpcode();
5570  const MCInstrDesc &MCID = getInstDesc(Opc);
5571  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
5572    assert(MCID.hasOptionalDef() &&
5573           "optionally flag setting instruction missing optional def operand");
5574    assert(MCID.NumOperands == Inst.getNumOperands() &&
5575           "operand count mismatch!");
5576    // Find the optional-def operand (cc_out).
5577    unsigned OpNo;
5578    for (OpNo = 0;
5579         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
5580         ++OpNo)
5581      ;
5582    // If we're parsing Thumb1, reject it completely.
5583    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
5584      return Match_MnemonicFail;
5585    // If we're parsing Thumb2, which form is legal depends on whether we're
5586    // in an IT block.
5587    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
5588        !inITBlock())
5589      return Match_RequiresITBlock;
5590    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
5591        inITBlock())
5592      return Match_RequiresNotITBlock;
5593  }
5594  // Some high-register supporting Thumb1 encodings only allow both registers
5595  // to be from r0-r7 when in Thumb2.
5596  else if (Opc == ARM::tADDhirr && isThumbOne() &&
5597           isARMLowRegister(Inst.getOperand(1).getReg()) &&
5598           isARMLowRegister(Inst.getOperand(2).getReg()))
5599    return Match_RequiresThumb2;
5600  // Others only require ARMv6 or later.
5601  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
5602           isARMLowRegister(Inst.getOperand(0).getReg()) &&
5603           isARMLowRegister(Inst.getOperand(1).getReg()))
5604    return Match_RequiresV6;
5605  return Match_Success;
5606}
5607
5608bool ARMAsmParser::
5609MatchAndEmitInstruction(SMLoc IDLoc,
5610                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
5611                        MCStreamer &Out) {
5612  MCInst Inst;
5613  unsigned ErrorInfo;
5614  unsigned MatchResult;
5615  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
5616  switch (MatchResult) {
5617  default: break;
5618  case Match_Success:
5619    // Context sensitive operand constraints aren't handled by the matcher,
5620    // so check them here.
5621    if (validateInstruction(Inst, Operands)) {
5622      // Still progress the IT block, otherwise one wrong condition causes
5623      // nasty cascading errors.
5624      forwardITPosition();
5625      return true;
5626    }
5627
5628    // Some instructions need post-processing to, for example, tweak which
5629    // encoding is selected. Loop on it while changes happen so the
5630    // individual transformations can chain off each other. E.g.,
5631    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
5632    while (processInstruction(Inst, Operands))
5633      ;
5634
5635    // Only move forward at the very end so that everything in validate
5636    // and process gets a consistent answer about whether we're in an IT
5637    // block.
5638    forwardITPosition();
5639
5640    Out.EmitInstruction(Inst);
5641    return false;
5642  case Match_MissingFeature:
5643    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
5644    return true;
5645  case Match_InvalidOperand: {
5646    SMLoc ErrorLoc = IDLoc;
5647    if (ErrorInfo != ~0U) {
5648      if (ErrorInfo >= Operands.size())
5649        return Error(IDLoc, "too few operands for instruction");
5650
5651      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
5652      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
5653    }
5654
5655    return Error(ErrorLoc, "invalid operand for instruction");
5656  }
5657  case Match_MnemonicFail:
5658    return Error(IDLoc, "invalid instruction");
5659  case Match_ConversionFail:
5660    // The converter function will have already emited a diagnostic.
5661    return true;
5662  case Match_RequiresNotITBlock:
5663    return Error(IDLoc, "flag setting instruction only valid outside IT block");
5664  case Match_RequiresITBlock:
5665    return Error(IDLoc, "instruction only valid inside IT block");
5666  case Match_RequiresV6:
5667    return Error(IDLoc, "instruction variant requires ARMv6 or later");
5668  case Match_RequiresThumb2:
5669    return Error(IDLoc, "instruction variant requires Thumb2");
5670  }
5671
5672  llvm_unreachable("Implement any new match types added!");
5673  return true;
5674}
5675
5676/// parseDirective parses the arm specific directives
5677bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
5678  StringRef IDVal = DirectiveID.getIdentifier();
5679  if (IDVal == ".word")
5680    return parseDirectiveWord(4, DirectiveID.getLoc());
5681  else if (IDVal == ".thumb")
5682    return parseDirectiveThumb(DirectiveID.getLoc());
5683  else if (IDVal == ".arm")
5684    return parseDirectiveARM(DirectiveID.getLoc());
5685  else if (IDVal == ".thumb_func")
5686    return parseDirectiveThumbFunc(DirectiveID.getLoc());
5687  else if (IDVal == ".code")
5688    return parseDirectiveCode(DirectiveID.getLoc());
5689  else if (IDVal == ".syntax")
5690    return parseDirectiveSyntax(DirectiveID.getLoc());
5691  return true;
5692}
5693
5694/// parseDirectiveWord
5695///  ::= .word [ expression (, expression)* ]
5696bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
5697  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5698    for (;;) {
5699      const MCExpr *Value;
5700      if (getParser().ParseExpression(Value))
5701        return true;
5702
5703      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
5704
5705      if (getLexer().is(AsmToken::EndOfStatement))
5706        break;
5707
5708      // FIXME: Improve diagnostic.
5709      if (getLexer().isNot(AsmToken::Comma))
5710        return Error(L, "unexpected token in directive");
5711      Parser.Lex();
5712    }
5713  }
5714
5715  Parser.Lex();
5716  return false;
5717}
5718
5719/// parseDirectiveThumb
5720///  ::= .thumb
5721bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
5722  if (getLexer().isNot(AsmToken::EndOfStatement))
5723    return Error(L, "unexpected token in directive");
5724  Parser.Lex();
5725
5726  if (!isThumb())
5727    SwitchMode();
5728  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
5729  return false;
5730}
5731
5732/// parseDirectiveARM
5733///  ::= .arm
5734bool ARMAsmParser::parseDirectiveARM(SMLoc L) {
5735  if (getLexer().isNot(AsmToken::EndOfStatement))
5736    return Error(L, "unexpected token in directive");
5737  Parser.Lex();
5738
5739  if (isThumb())
5740    SwitchMode();
5741  getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
5742  return false;
5743}
5744
5745/// parseDirectiveThumbFunc
5746///  ::= .thumbfunc symbol_name
5747bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
5748  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
5749  bool isMachO = MAI.hasSubsectionsViaSymbols();
5750  StringRef Name;
5751
5752  // Darwin asm has function name after .thumb_func direction
5753  // ELF doesn't
5754  if (isMachO) {
5755    const AsmToken &Tok = Parser.getTok();
5756    if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
5757      return Error(L, "unexpected token in .thumb_func directive");
5758    Name = Tok.getIdentifier();
5759    Parser.Lex(); // Consume the identifier token.
5760  }
5761
5762 if (getLexer().isNot(AsmToken::EndOfStatement))
5763    return Error(L, "unexpected token in directive");
5764  Parser.Lex();
5765
5766  // FIXME: assuming function name will be the line following .thumb_func
5767  if (!isMachO) {
5768    Name = Parser.getTok().getIdentifier();
5769  }
5770
5771  // Mark symbol as a thumb symbol.
5772  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
5773  getParser().getStreamer().EmitThumbFunc(Func);
5774  return false;
5775}
5776
5777/// parseDirectiveSyntax
5778///  ::= .syntax unified | divided
5779bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
5780  const AsmToken &Tok = Parser.getTok();
5781  if (Tok.isNot(AsmToken::Identifier))
5782    return Error(L, "unexpected token in .syntax directive");
5783  StringRef Mode = Tok.getString();
5784  if (Mode == "unified" || Mode == "UNIFIED")
5785    Parser.Lex();
5786  else if (Mode == "divided" || Mode == "DIVIDED")
5787    return Error(L, "'.syntax divided' arm asssembly not supported");
5788  else
5789    return Error(L, "unrecognized syntax mode in .syntax directive");
5790
5791  if (getLexer().isNot(AsmToken::EndOfStatement))
5792    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
5793  Parser.Lex();
5794
5795  // TODO tell the MC streamer the mode
5796  // getParser().getStreamer().Emit???();
5797  return false;
5798}
5799
5800/// parseDirectiveCode
5801///  ::= .code 16 | 32
5802bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
5803  const AsmToken &Tok = Parser.getTok();
5804  if (Tok.isNot(AsmToken::Integer))
5805    return Error(L, "unexpected token in .code directive");
5806  int64_t Val = Parser.getTok().getIntVal();
5807  if (Val == 16)
5808    Parser.Lex();
5809  else if (Val == 32)
5810    Parser.Lex();
5811  else
5812    return Error(L, "invalid operand to .code directive");
5813
5814  if (getLexer().isNot(AsmToken::EndOfStatement))
5815    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
5816  Parser.Lex();
5817
5818  if (Val == 16) {
5819    if (!isThumb())
5820      SwitchMode();
5821    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
5822  } else {
5823    if (isThumb())
5824      SwitchMode();
5825    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
5826  }
5827
5828  return false;
5829}
5830
5831extern "C" void LLVMInitializeARMAsmLexer();
5832
5833/// Force static initialization.
5834extern "C" void LLVMInitializeARMAsmParser() {
5835  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
5836  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
5837  LLVMInitializeARMAsmLexer();
5838}
5839
5840#define GET_REGISTER_MATCHER
5841#define GET_MATCHER_IMPLEMENTATION
5842#include "ARMGenAsmMatcher.inc"
5843