ARMAsmParser.cpp revision 713c70238c6d150d2cd458b07ab35932fafe508e
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  struct {
49    ARMCC::CondCodes Cond;    // Condition for IT block.
50    unsigned Mask:4;          // Condition mask for instructions.
51                              // Starting at first 1 (from lsb).
52                              //   '1'  condition as indicated in IT.
53                              //   '0'  inverse of condition (else).
54                              // Count of instructions in IT block is
55                              // 4 - trailingzeroes(mask)
56
57    bool FirstCond;           // Explicit flag for when we're parsing the
58                              // First instruction in the IT block. It's
59                              // implied in the mask, so needs special
60                              // handling.
61
62    unsigned CurPosition;     // Current position in parsing of IT
63                              // block. In range [0,3]. Initialized
64                              // according to count of instructions in block.
65                              // ~0U if no active IT block.
66  } ITState;
67  bool inITBlock() { return ITState.CurPosition != ~0U;}
68  void forwardITPosition() {
69    if (!inITBlock()) return;
70    // Move to the next instruction in the IT block, if there is one. If not,
71    // mark the block as done.
72    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
73    if (++ITState.CurPosition == 5 - TZ)
74      ITState.CurPosition = ~0U; // Done with the IT block after this.
75  }
76
77
78  MCAsmParser &getParser() const { return Parser; }
79  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
80
81  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
82  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
83
84  int tryParseRegister();
85  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
86  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
87  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
88  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
89  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
90  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
91  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
92                              unsigned &ShiftAmount);
93  bool parseDirectiveWord(unsigned Size, SMLoc L);
94  bool parseDirectiveThumb(SMLoc L);
95  bool parseDirectiveThumbFunc(SMLoc L);
96  bool parseDirectiveCode(SMLoc L);
97  bool parseDirectiveSyntax(SMLoc L);
98
99  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
100                          bool &CarrySetting, unsigned &ProcessorIMod,
101                          StringRef &ITMask);
102  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
103                             bool &CanAcceptPredicationCode);
104
105  bool isThumb() const {
106    // FIXME: Can tablegen auto-generate this?
107    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
108  }
109  bool isThumbOne() const {
110    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
111  }
112  bool isThumbTwo() const {
113    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
114  }
115  bool hasV6Ops() const {
116    return STI.getFeatureBits() & ARM::HasV6Ops;
117  }
118  bool hasV7Ops() const {
119    return STI.getFeatureBits() & ARM::HasV7Ops;
120  }
121  void SwitchMode() {
122    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
123    setAvailableFeatures(FB);
124  }
125  bool isMClass() const {
126    return STI.getFeatureBits() & ARM::FeatureMClass;
127  }
128
129  /// @name Auto-generated Match Functions
130  /// {
131
132#define GET_ASSEMBLER_HEADER
133#include "ARMGenAsmMatcher.inc"
134
135  /// }
136
137  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
138  OperandMatchResultTy parseCoprocNumOperand(
139    SmallVectorImpl<MCParsedAsmOperand*>&);
140  OperandMatchResultTy parseCoprocRegOperand(
141    SmallVectorImpl<MCParsedAsmOperand*>&);
142  OperandMatchResultTy parseCoprocOptionOperand(
143    SmallVectorImpl<MCParsedAsmOperand*>&);
144  OperandMatchResultTy parseMemBarrierOptOperand(
145    SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseProcIFlagsOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseMSRMaskOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
151                                   StringRef Op, int Low, int High);
152  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
153    return parsePKHImm(O, "lsl", 0, 31);
154  }
155  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
156    return parsePKHImm(O, "asr", 1, 32);
157  }
158  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
160  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
161  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
162  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
163  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
164  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
167
168  // Asm Match Converter Methods
169  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
170                    const SmallVectorImpl<MCParsedAsmOperand*> &);
171  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
172                    const SmallVectorImpl<MCParsedAsmOperand*> &);
173  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
174                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
175  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
176                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
177  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
178                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
180                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
188                             const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
190                             const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
192                             const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
194                             const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
196                  const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
198                  const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
200                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
202                        const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
204                     const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
206                        const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
208                     const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211
212  bool validateInstruction(MCInst &Inst,
213                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
214  bool processInstruction(MCInst &Inst,
215                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
216  bool shouldOmitCCOutOperand(StringRef Mnemonic,
217                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
218
219public:
220  enum ARMMatchResultTy {
221    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
222    Match_RequiresNotITBlock,
223    Match_RequiresV6,
224    Match_RequiresThumb2
225  };
226
227  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
228    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
229    MCAsmParserExtension::Initialize(_Parser);
230
231    // Initialize the set of available features.
232    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
233
234    // Not in an ITBlock to start with.
235    ITState.CurPosition = ~0U;
236  }
237
238  // Implementation of the MCTargetAsmParser interface:
239  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
240  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
241                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
242  bool ParseDirective(AsmToken DirectiveID);
243
244  unsigned checkTargetMatchPredicate(MCInst &Inst);
245
246  bool MatchAndEmitInstruction(SMLoc IDLoc,
247                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
248                               MCStreamer &Out);
249};
250} // end anonymous namespace
251
252namespace {
253
254/// ARMOperand - Instances of this class represent a parsed ARM machine
255/// instruction.
256class ARMOperand : public MCParsedAsmOperand {
257  enum KindTy {
258    k_CondCode,
259    k_CCOut,
260    k_ITCondMask,
261    k_CoprocNum,
262    k_CoprocReg,
263    k_CoprocOption,
264    k_Immediate,
265    k_FPImmediate,
266    k_MemBarrierOpt,
267    k_Memory,
268    k_PostIndexRegister,
269    k_MSRMask,
270    k_ProcIFlags,
271    k_VectorIndex,
272    k_Register,
273    k_RegisterList,
274    k_DPRRegisterList,
275    k_SPRRegisterList,
276    k_VectorList,
277    k_VectorListAllLanes,
278    k_VectorListIndexed,
279    k_ShiftedRegister,
280    k_ShiftedImmediate,
281    k_ShifterImmediate,
282    k_RotateImmediate,
283    k_BitfieldDescriptor,
284    k_Token
285  } Kind;
286
287  SMLoc StartLoc, EndLoc;
288  SmallVector<unsigned, 8> Registers;
289
290  union {
291    struct {
292      ARMCC::CondCodes Val;
293    } CC;
294
295    struct {
296      unsigned Val;
297    } Cop;
298
299    struct {
300      unsigned Val;
301    } CoprocOption;
302
303    struct {
304      unsigned Mask:4;
305    } ITMask;
306
307    struct {
308      ARM_MB::MemBOpt Val;
309    } MBOpt;
310
311    struct {
312      ARM_PROC::IFlags Val;
313    } IFlags;
314
315    struct {
316      unsigned Val;
317    } MMask;
318
319    struct {
320      const char *Data;
321      unsigned Length;
322    } Tok;
323
324    struct {
325      unsigned RegNum;
326    } Reg;
327
328    // A vector register list is a sequential list of 1 to 4 registers.
329    struct {
330      unsigned RegNum;
331      unsigned Count;
332      unsigned LaneIndex;
333    } VectorList;
334
335    struct {
336      unsigned Val;
337    } VectorIndex;
338
339    struct {
340      const MCExpr *Val;
341    } Imm;
342
343    struct {
344      unsigned Val;       // encoded 8-bit representation
345    } FPImm;
346
347    /// Combined record for all forms of ARM address expressions.
348    struct {
349      unsigned BaseRegNum;
350      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
351      // was specified.
352      const MCConstantExpr *OffsetImm;  // Offset immediate value
353      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
354      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
355      unsigned ShiftImm;        // shift for OffsetReg.
356      unsigned Alignment;       // 0 = no alignment specified
357                                // n = alignment in bytes (8, 16, or 32)
358      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
359    } Memory;
360
361    struct {
362      unsigned RegNum;
363      bool isAdd;
364      ARM_AM::ShiftOpc ShiftTy;
365      unsigned ShiftImm;
366    } PostIdxReg;
367
368    struct {
369      bool isASR;
370      unsigned Imm;
371    } ShifterImm;
372    struct {
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned SrcReg;
375      unsigned ShiftReg;
376      unsigned ShiftImm;
377    } RegShiftedReg;
378    struct {
379      ARM_AM::ShiftOpc ShiftTy;
380      unsigned SrcReg;
381      unsigned ShiftImm;
382    } RegShiftedImm;
383    struct {
384      unsigned Imm;
385    } RotImm;
386    struct {
387      unsigned LSB;
388      unsigned Width;
389    } Bitfield;
390  };
391
392  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
393public:
394  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
395    Kind = o.Kind;
396    StartLoc = o.StartLoc;
397    EndLoc = o.EndLoc;
398    switch (Kind) {
399    case k_CondCode:
400      CC = o.CC;
401      break;
402    case k_ITCondMask:
403      ITMask = o.ITMask;
404      break;
405    case k_Token:
406      Tok = o.Tok;
407      break;
408    case k_CCOut:
409    case k_Register:
410      Reg = o.Reg;
411      break;
412    case k_RegisterList:
413    case k_DPRRegisterList:
414    case k_SPRRegisterList:
415      Registers = o.Registers;
416      break;
417    case k_VectorList:
418    case k_VectorListAllLanes:
419    case k_VectorListIndexed:
420      VectorList = o.VectorList;
421      break;
422    case k_CoprocNum:
423    case k_CoprocReg:
424      Cop = o.Cop;
425      break;
426    case k_CoprocOption:
427      CoprocOption = o.CoprocOption;
428      break;
429    case k_Immediate:
430      Imm = o.Imm;
431      break;
432    case k_FPImmediate:
433      FPImm = o.FPImm;
434      break;
435    case k_MemBarrierOpt:
436      MBOpt = o.MBOpt;
437      break;
438    case k_Memory:
439      Memory = o.Memory;
440      break;
441    case k_PostIndexRegister:
442      PostIdxReg = o.PostIdxReg;
443      break;
444    case k_MSRMask:
445      MMask = o.MMask;
446      break;
447    case k_ProcIFlags:
448      IFlags = o.IFlags;
449      break;
450    case k_ShifterImmediate:
451      ShifterImm = o.ShifterImm;
452      break;
453    case k_ShiftedRegister:
454      RegShiftedReg = o.RegShiftedReg;
455      break;
456    case k_ShiftedImmediate:
457      RegShiftedImm = o.RegShiftedImm;
458      break;
459    case k_RotateImmediate:
460      RotImm = o.RotImm;
461      break;
462    case k_BitfieldDescriptor:
463      Bitfield = o.Bitfield;
464      break;
465    case k_VectorIndex:
466      VectorIndex = o.VectorIndex;
467      break;
468    }
469  }
470
471  /// getStartLoc - Get the location of the first token of this operand.
472  SMLoc getStartLoc() const { return StartLoc; }
473  /// getEndLoc - Get the location of the last token of this operand.
474  SMLoc getEndLoc() const { return EndLoc; }
475
476  ARMCC::CondCodes getCondCode() const {
477    assert(Kind == k_CondCode && "Invalid access!");
478    return CC.Val;
479  }
480
481  unsigned getCoproc() const {
482    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
483    return Cop.Val;
484  }
485
486  StringRef getToken() const {
487    assert(Kind == k_Token && "Invalid access!");
488    return StringRef(Tok.Data, Tok.Length);
489  }
490
491  unsigned getReg() const {
492    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
493    return Reg.RegNum;
494  }
495
496  const SmallVectorImpl<unsigned> &getRegList() const {
497    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
498            Kind == k_SPRRegisterList) && "Invalid access!");
499    return Registers;
500  }
501
502  const MCExpr *getImm() const {
503    assert(Kind == k_Immediate && "Invalid access!");
504    return Imm.Val;
505  }
506
507  unsigned getFPImm() const {
508    assert(Kind == k_FPImmediate && "Invalid access!");
509    return FPImm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const { return Kind == k_FPImmediate; }
541  bool isImm8s4() const {
542    if (Kind != k_Immediate)
543      return false;
544    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
545    if (!CE) return false;
546    int64_t Value = CE->getValue();
547    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
548  }
549  bool isImm0_1020s4() const {
550    if (Kind != k_Immediate)
551      return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
556  }
557  bool isImm0_508s4() const {
558    if (Kind != k_Immediate)
559      return false;
560    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
561    if (!CE) return false;
562    int64_t Value = CE->getValue();
563    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
564  }
565  bool isImm0_255() const {
566    if (Kind != k_Immediate)
567      return false;
568    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
569    if (!CE) return false;
570    int64_t Value = CE->getValue();
571    return Value >= 0 && Value < 256;
572  }
573  bool isImm0_1() const {
574    if (Kind != k_Immediate)
575      return false;
576    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
577    if (!CE) return false;
578    int64_t Value = CE->getValue();
579    return Value >= 0 && Value < 2;
580  }
581  bool isImm0_3() const {
582    if (Kind != k_Immediate)
583      return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = CE->getValue();
587    return Value >= 0 && Value < 4;
588  }
589  bool isImm0_7() const {
590    if (Kind != k_Immediate)
591      return false;
592    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
593    if (!CE) return false;
594    int64_t Value = CE->getValue();
595    return Value >= 0 && Value < 8;
596  }
597  bool isImm0_15() const {
598    if (Kind != k_Immediate)
599      return false;
600    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
601    if (!CE) return false;
602    int64_t Value = CE->getValue();
603    return Value >= 0 && Value < 16;
604  }
605  bool isImm0_31() const {
606    if (Kind != k_Immediate)
607      return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value >= 0 && Value < 32;
612  }
613  bool isImm1_16() const {
614    if (Kind != k_Immediate)
615      return false;
616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
617    if (!CE) return false;
618    int64_t Value = CE->getValue();
619    return Value > 0 && Value < 17;
620  }
621  bool isImm1_32() const {
622    if (Kind != k_Immediate)
623      return false;
624    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
625    if (!CE) return false;
626    int64_t Value = CE->getValue();
627    return Value > 0 && Value < 33;
628  }
629  bool isImm0_32() const {
630    if (Kind != k_Immediate)
631      return false;
632    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
633    if (!CE) return false;
634    int64_t Value = CE->getValue();
635    return Value >= 0 && Value < 33;
636  }
637  bool isImm0_65535() const {
638    if (Kind != k_Immediate)
639      return false;
640    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
641    if (!CE) return false;
642    int64_t Value = CE->getValue();
643    return Value >= 0 && Value < 65536;
644  }
645  bool isImm0_65535Expr() const {
646    if (Kind != k_Immediate)
647      return false;
648    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
649    // If it's not a constant expression, it'll generate a fixup and be
650    // handled later.
651    if (!CE) return true;
652    int64_t Value = CE->getValue();
653    return Value >= 0 && Value < 65536;
654  }
655  bool isImm24bit() const {
656    if (Kind != k_Immediate)
657      return false;
658    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
659    if (!CE) return false;
660    int64_t Value = CE->getValue();
661    return Value >= 0 && Value <= 0xffffff;
662  }
663  bool isImmThumbSR() const {
664    if (Kind != k_Immediate)
665      return false;
666    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
667    if (!CE) return false;
668    int64_t Value = CE->getValue();
669    return Value > 0 && Value < 33;
670  }
671  bool isPKHLSLImm() const {
672    if (Kind != k_Immediate)
673      return false;
674    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
675    if (!CE) return false;
676    int64_t Value = CE->getValue();
677    return Value >= 0 && Value < 32;
678  }
679  bool isPKHASRImm() const {
680    if (Kind != k_Immediate)
681      return false;
682    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683    if (!CE) return false;
684    int64_t Value = CE->getValue();
685    return Value > 0 && Value <= 32;
686  }
687  bool isARMSOImm() const {
688    if (Kind != k_Immediate)
689      return false;
690    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
691    if (!CE) return false;
692    int64_t Value = CE->getValue();
693    return ARM_AM::getSOImmVal(Value) != -1;
694  }
695  bool isARMSOImmNot() const {
696    if (Kind != k_Immediate)
697      return false;
698    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
699    if (!CE) return false;
700    int64_t Value = CE->getValue();
701    return ARM_AM::getSOImmVal(~Value) != -1;
702  }
703  bool isT2SOImm() const {
704    if (Kind != k_Immediate)
705      return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return ARM_AM::getT2SOImmVal(Value) != -1;
710  }
711  bool isT2SOImmNot() const {
712    if (Kind != k_Immediate)
713      return false;
714    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
715    if (!CE) return false;
716    int64_t Value = CE->getValue();
717    return ARM_AM::getT2SOImmVal(~Value) != -1;
718  }
719  bool isSetEndImm() const {
720    if (Kind != k_Immediate)
721      return false;
722    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
723    if (!CE) return false;
724    int64_t Value = CE->getValue();
725    return Value == 1 || Value == 0;
726  }
727  bool isReg() const { return Kind == k_Register; }
728  bool isRegList() const { return Kind == k_RegisterList; }
729  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
730  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
731  bool isToken() const { return Kind == k_Token; }
732  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
733  bool isMemory() const { return Kind == k_Memory; }
734  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
735  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
736  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
737  bool isRotImm() const { return Kind == k_RotateImmediate; }
738  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
739  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
740  bool isPostIdxReg() const {
741    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
742  }
743  bool isMemNoOffset(bool alignOK = false) const {
744    if (!isMemory())
745      return false;
746    // No offset of any kind.
747    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
748     (alignOK || Memory.Alignment == 0);
749  }
750  bool isAlignedMemory() const {
751    return isMemNoOffset(true);
752  }
753  bool isAddrMode2() const {
754    if (!isMemory() || Memory.Alignment != 0) return false;
755    // Check for register offset.
756    if (Memory.OffsetRegNum) return true;
757    // Immediate offset in range [-4095, 4095].
758    if (!Memory.OffsetImm) return true;
759    int64_t Val = Memory.OffsetImm->getValue();
760    return Val > -4096 && Val < 4096;
761  }
762  bool isAM2OffsetImm() const {
763    if (Kind != k_Immediate)
764      return false;
765    // Immediate offset in range [-4095, 4095].
766    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
767    if (!CE) return false;
768    int64_t Val = CE->getValue();
769    return Val > -4096 && Val < 4096;
770  }
771  bool isAddrMode3() const {
772    if (!isMemory() || Memory.Alignment != 0) return false;
773    // No shifts are legal for AM3.
774    if (Memory.ShiftType != ARM_AM::no_shift) return false;
775    // Check for register offset.
776    if (Memory.OffsetRegNum) return true;
777    // Immediate offset in range [-255, 255].
778    if (!Memory.OffsetImm) return true;
779    int64_t Val = Memory.OffsetImm->getValue();
780    return Val > -256 && Val < 256;
781  }
782  bool isAM3Offset() const {
783    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
784      return false;
785    if (Kind == k_PostIndexRegister)
786      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
787    // Immediate offset in range [-255, 255].
788    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
789    if (!CE) return false;
790    int64_t Val = CE->getValue();
791    // Special case, #-0 is INT32_MIN.
792    return (Val > -256 && Val < 256) || Val == INT32_MIN;
793  }
794  bool isAddrMode5() const {
795    // If we have an immediate that's not a constant, treat it as a label
796    // reference needing a fixup. If it is a constant, it's something else
797    // and we reject it.
798    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
799      return true;
800    if (!isMemory() || Memory.Alignment != 0) return false;
801    // Check for register offset.
802    if (Memory.OffsetRegNum) return false;
803    // Immediate offset in range [-1020, 1020] and a multiple of 4.
804    if (!Memory.OffsetImm) return true;
805    int64_t Val = Memory.OffsetImm->getValue();
806    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
807      Val == INT32_MIN;
808  }
809  bool isMemTBB() const {
810    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
811        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
812      return false;
813    return true;
814  }
815  bool isMemTBH() const {
816    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
817        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
818        Memory.Alignment != 0 )
819      return false;
820    return true;
821  }
822  bool isMemRegOffset() const {
823    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
824      return false;
825    return true;
826  }
827  bool isT2MemRegOffset() const {
828    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
829        Memory.Alignment != 0)
830      return false;
831    // Only lsl #{0, 1, 2, 3} allowed.
832    if (Memory.ShiftType == ARM_AM::no_shift)
833      return true;
834    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
835      return false;
836    return true;
837  }
838  bool isMemThumbRR() const {
839    // Thumb reg+reg addressing is simple. Just two registers, a base and
840    // an offset. No shifts, negations or any other complicating factors.
841    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
842        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
843      return false;
844    return isARMLowRegister(Memory.BaseRegNum) &&
845      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
846  }
847  bool isMemThumbRIs4() const {
848    if (!isMemory() || Memory.OffsetRegNum != 0 ||
849        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
850      return false;
851    // Immediate offset, multiple of 4 in range [0, 124].
852    if (!Memory.OffsetImm) return true;
853    int64_t Val = Memory.OffsetImm->getValue();
854    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
855  }
856  bool isMemThumbRIs2() const {
857    if (!isMemory() || Memory.OffsetRegNum != 0 ||
858        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
859      return false;
860    // Immediate offset, multiple of 4 in range [0, 62].
861    if (!Memory.OffsetImm) return true;
862    int64_t Val = Memory.OffsetImm->getValue();
863    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
864  }
865  bool isMemThumbRIs1() const {
866    if (!isMemory() || Memory.OffsetRegNum != 0 ||
867        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
868      return false;
869    // Immediate offset in range [0, 31].
870    if (!Memory.OffsetImm) return true;
871    int64_t Val = Memory.OffsetImm->getValue();
872    return Val >= 0 && Val <= 31;
873  }
874  bool isMemThumbSPI() const {
875    if (!isMemory() || Memory.OffsetRegNum != 0 ||
876        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
877      return false;
878    // Immediate offset, multiple of 4 in range [0, 1020].
879    if (!Memory.OffsetImm) return true;
880    int64_t Val = Memory.OffsetImm->getValue();
881    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
882  }
883  bool isMemImm8s4Offset() const {
884    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
885      return false;
886    // Immediate offset a multiple of 4 in range [-1020, 1020].
887    if (!Memory.OffsetImm) return true;
888    int64_t Val = Memory.OffsetImm->getValue();
889    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
890  }
891  bool isMemImm0_1020s4Offset() const {
892    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
893      return false;
894    // Immediate offset a multiple of 4 in range [0, 1020].
895    if (!Memory.OffsetImm) return true;
896    int64_t Val = Memory.OffsetImm->getValue();
897    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
898  }
899  bool isMemImm8Offset() const {
900    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
901      return false;
902    // Immediate offset in range [-255, 255].
903    if (!Memory.OffsetImm) return true;
904    int64_t Val = Memory.OffsetImm->getValue();
905    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
906  }
907  bool isMemPosImm8Offset() const {
908    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
909      return false;
910    // Immediate offset in range [0, 255].
911    if (!Memory.OffsetImm) return true;
912    int64_t Val = Memory.OffsetImm->getValue();
913    return Val >= 0 && Val < 256;
914  }
915  bool isMemNegImm8Offset() const {
916    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
917      return false;
918    // Immediate offset in range [-255, -1].
919    if (!Memory.OffsetImm) return true;
920    int64_t Val = Memory.OffsetImm->getValue();
921    return Val > -256 && Val < 0;
922  }
923  bool isMemUImm12Offset() const {
924    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
925      return false;
926    // Immediate offset in range [0, 4095].
927    if (!Memory.OffsetImm) return true;
928    int64_t Val = Memory.OffsetImm->getValue();
929    return (Val >= 0 && Val < 4096);
930  }
931  bool isMemImm12Offset() const {
932    // If we have an immediate that's not a constant, treat it as a label
933    // reference needing a fixup. If it is a constant, it's something else
934    // and we reject it.
935    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
936      return true;
937
938    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
939      return false;
940    // Immediate offset in range [-4095, 4095].
941    if (!Memory.OffsetImm) return true;
942    int64_t Val = Memory.OffsetImm->getValue();
943    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
944  }
945  bool isPostIdxImm8() const {
946    if (Kind != k_Immediate)
947      return false;
948    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
949    if (!CE) return false;
950    int64_t Val = CE->getValue();
951    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
952  }
953  bool isPostIdxImm8s4() const {
954    if (Kind != k_Immediate)
955      return false;
956    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
957    if (!CE) return false;
958    int64_t Val = CE->getValue();
959    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
960      (Val == INT32_MIN);
961  }
962
963  bool isMSRMask() const { return Kind == k_MSRMask; }
964  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
965
966  // NEON operands.
967  bool isVecListOneD() const {
968    if (Kind != k_VectorList) return false;
969    return VectorList.Count == 1;
970  }
971
972  bool isVecListTwoD() const {
973    if (Kind != k_VectorList) return false;
974    return VectorList.Count == 2;
975  }
976
977  bool isVecListThreeD() const {
978    if (Kind != k_VectorList) return false;
979    return VectorList.Count == 3;
980  }
981
982  bool isVecListFourD() const {
983    if (Kind != k_VectorList) return false;
984    return VectorList.Count == 4;
985  }
986
987  bool isVecListTwoQ() const {
988    if (Kind != k_VectorList) return false;
989    //FIXME: We haven't taught the parser to handle by-two register lists
990    // yet, so don't pretend to know one.
991    return VectorList.Count == 2 && false;
992  }
993
994  bool isVecListOneDAllLanes() const {
995    if (Kind != k_VectorListAllLanes) return false;
996    return VectorList.Count == 1;
997  }
998
999  bool isVecListTwoDAllLanes() const {
1000    if (Kind != k_VectorListAllLanes) return false;
1001    return VectorList.Count == 2;
1002  }
1003
1004  bool isVecListOneDByteIndexed() const {
1005    if (Kind != k_VectorListIndexed) return false;
1006    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
1007  }
1008
1009  bool isVectorIndex8() const {
1010    if (Kind != k_VectorIndex) return false;
1011    return VectorIndex.Val < 8;
1012  }
1013  bool isVectorIndex16() const {
1014    if (Kind != k_VectorIndex) return false;
1015    return VectorIndex.Val < 4;
1016  }
1017  bool isVectorIndex32() const {
1018    if (Kind != k_VectorIndex) return false;
1019    return VectorIndex.Val < 2;
1020  }
1021
1022  bool isNEONi8splat() const {
1023    if (Kind != k_Immediate)
1024      return false;
1025    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1026    // Must be a constant.
1027    if (!CE) return false;
1028    int64_t Value = CE->getValue();
1029    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1030    // value.
1031    return Value >= 0 && Value < 256;
1032  }
1033
1034  bool isNEONi16splat() const {
1035    if (Kind != k_Immediate)
1036      return false;
1037    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1038    // Must be a constant.
1039    if (!CE) return false;
1040    int64_t Value = CE->getValue();
1041    // i16 value in the range [0,255] or [0x0100, 0xff00]
1042    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1043  }
1044
1045  bool isNEONi32splat() const {
1046    if (Kind != k_Immediate)
1047      return false;
1048    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1049    // Must be a constant.
1050    if (!CE) return false;
1051    int64_t Value = CE->getValue();
1052    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1053    return (Value >= 0 && Value < 256) ||
1054      (Value >= 0x0100 && Value <= 0xff00) ||
1055      (Value >= 0x010000 && Value <= 0xff0000) ||
1056      (Value >= 0x01000000 && Value <= 0xff000000);
1057  }
1058
1059  bool isNEONi32vmov() const {
1060    if (Kind != k_Immediate)
1061      return false;
1062    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1063    // Must be a constant.
1064    if (!CE) return false;
1065    int64_t Value = CE->getValue();
1066    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1067    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1068    return (Value >= 0 && Value < 256) ||
1069      (Value >= 0x0100 && Value <= 0xff00) ||
1070      (Value >= 0x010000 && Value <= 0xff0000) ||
1071      (Value >= 0x01000000 && Value <= 0xff000000) ||
1072      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1073      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1074  }
1075
1076  bool isNEONi64splat() const {
1077    if (Kind != k_Immediate)
1078      return false;
1079    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1080    // Must be a constant.
1081    if (!CE) return false;
1082    uint64_t Value = CE->getValue();
1083    // i64 value with each byte being either 0 or 0xff.
1084    for (unsigned i = 0; i < 8; ++i)
1085      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1086    return true;
1087  }
1088
1089  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1090    // Add as immediates when possible.  Null MCExpr = 0.
1091    if (Expr == 0)
1092      Inst.addOperand(MCOperand::CreateImm(0));
1093    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1094      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1095    else
1096      Inst.addOperand(MCOperand::CreateExpr(Expr));
1097  }
1098
1099  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1100    assert(N == 2 && "Invalid number of operands!");
1101    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1102    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1103    Inst.addOperand(MCOperand::CreateReg(RegNum));
1104  }
1105
1106  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1107    assert(N == 1 && "Invalid number of operands!");
1108    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1109  }
1110
1111  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1112    assert(N == 1 && "Invalid number of operands!");
1113    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1114  }
1115
1116  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1117    assert(N == 1 && "Invalid number of operands!");
1118    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1119  }
1120
1121  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1122    assert(N == 1 && "Invalid number of operands!");
1123    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1124  }
1125
1126  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1127    assert(N == 1 && "Invalid number of operands!");
1128    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1129  }
1130
1131  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1132    assert(N == 1 && "Invalid number of operands!");
1133    Inst.addOperand(MCOperand::CreateReg(getReg()));
1134  }
1135
1136  void addRegOperands(MCInst &Inst, unsigned N) const {
1137    assert(N == 1 && "Invalid number of operands!");
1138    Inst.addOperand(MCOperand::CreateReg(getReg()));
1139  }
1140
1141  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1142    assert(N == 3 && "Invalid number of operands!");
1143    assert(isRegShiftedReg() &&
1144           "addRegShiftedRegOperands() on non RegShiftedReg!");
1145    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1146    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1147    Inst.addOperand(MCOperand::CreateImm(
1148      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1149  }
1150
1151  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1152    assert(N == 2 && "Invalid number of operands!");
1153    assert(isRegShiftedImm() &&
1154           "addRegShiftedImmOperands() on non RegShiftedImm!");
1155    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1156    Inst.addOperand(MCOperand::CreateImm(
1157      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1158  }
1159
1160  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1161    assert(N == 1 && "Invalid number of operands!");
1162    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1163                                         ShifterImm.Imm));
1164  }
1165
1166  void addRegListOperands(MCInst &Inst, unsigned N) const {
1167    assert(N == 1 && "Invalid number of operands!");
1168    const SmallVectorImpl<unsigned> &RegList = getRegList();
1169    for (SmallVectorImpl<unsigned>::const_iterator
1170           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1171      Inst.addOperand(MCOperand::CreateReg(*I));
1172  }
1173
1174  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1175    addRegListOperands(Inst, N);
1176  }
1177
1178  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1179    addRegListOperands(Inst, N);
1180  }
1181
1182  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1183    assert(N == 1 && "Invalid number of operands!");
1184    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1185    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1186  }
1187
1188  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1189    assert(N == 1 && "Invalid number of operands!");
1190    // Munge the lsb/width into a bitfield mask.
1191    unsigned lsb = Bitfield.LSB;
1192    unsigned width = Bitfield.Width;
1193    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1194    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1195                      (32 - (lsb + width)));
1196    Inst.addOperand(MCOperand::CreateImm(Mask));
1197  }
1198
1199  void addImmOperands(MCInst &Inst, unsigned N) const {
1200    assert(N == 1 && "Invalid number of operands!");
1201    addExpr(Inst, getImm());
1202  }
1203
1204  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1205    assert(N == 1 && "Invalid number of operands!");
1206    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1207  }
1208
1209  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1210    assert(N == 1 && "Invalid number of operands!");
1211    // FIXME: We really want to scale the value here, but the LDRD/STRD
1212    // instruction don't encode operands that way yet.
1213    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1214    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1215  }
1216
1217  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1218    assert(N == 1 && "Invalid number of operands!");
1219    // The immediate is scaled by four in the encoding and is stored
1220    // in the MCInst as such. Lop off the low two bits here.
1221    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1222    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1223  }
1224
1225  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1226    assert(N == 1 && "Invalid number of operands!");
1227    // The immediate is scaled by four in the encoding and is stored
1228    // in the MCInst as such. Lop off the low two bits here.
1229    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1230    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1231  }
1232
1233  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1234    assert(N == 1 && "Invalid number of operands!");
1235    // The constant encodes as the immediate-1, and we store in the instruction
1236    // the bits as encoded, so subtract off one here.
1237    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1238    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1239  }
1240
1241  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1242    assert(N == 1 && "Invalid number of operands!");
1243    // The constant encodes as the immediate-1, and we store in the instruction
1244    // the bits as encoded, so subtract off one here.
1245    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1246    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1247  }
1248
1249  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1250    assert(N == 1 && "Invalid number of operands!");
1251    // The constant encodes as the immediate, except for 32, which encodes as
1252    // zero.
1253    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1254    unsigned Imm = CE->getValue();
1255    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1256  }
1257
1258  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1259    assert(N == 1 && "Invalid number of operands!");
1260    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1261    // the instruction as well.
1262    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1263    int Val = CE->getValue();
1264    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1265  }
1266
1267  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1268    assert(N == 1 && "Invalid number of operands!");
1269    // The operand is actually a t2_so_imm, but we have its bitwise
1270    // negation in the assembly source, so twiddle it here.
1271    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1272    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1273  }
1274
1275  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1276    assert(N == 1 && "Invalid number of operands!");
1277    // The operand is actually a so_imm, but we have its bitwise
1278    // negation in the assembly source, so twiddle it here.
1279    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1280    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1281  }
1282
1283  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1284    assert(N == 1 && "Invalid number of operands!");
1285    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1286  }
1287
1288  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1289    assert(N == 1 && "Invalid number of operands!");
1290    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1291  }
1292
1293  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1294    assert(N == 2 && "Invalid number of operands!");
1295    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1296    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1297  }
1298
1299  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1300    assert(N == 3 && "Invalid number of operands!");
1301    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1302    if (!Memory.OffsetRegNum) {
1303      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1304      // Special case for #-0
1305      if (Val == INT32_MIN) Val = 0;
1306      if (Val < 0) Val = -Val;
1307      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1308    } else {
1309      // For register offset, we encode the shift type and negation flag
1310      // here.
1311      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1312                              Memory.ShiftImm, Memory.ShiftType);
1313    }
1314    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1315    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1316    Inst.addOperand(MCOperand::CreateImm(Val));
1317  }
1318
1319  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1320    assert(N == 2 && "Invalid number of operands!");
1321    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1322    assert(CE && "non-constant AM2OffsetImm operand!");
1323    int32_t Val = CE->getValue();
1324    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1325    // Special case for #-0
1326    if (Val == INT32_MIN) Val = 0;
1327    if (Val < 0) Val = -Val;
1328    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1329    Inst.addOperand(MCOperand::CreateReg(0));
1330    Inst.addOperand(MCOperand::CreateImm(Val));
1331  }
1332
1333  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1334    assert(N == 3 && "Invalid number of operands!");
1335    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1336    if (!Memory.OffsetRegNum) {
1337      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1338      // Special case for #-0
1339      if (Val == INT32_MIN) Val = 0;
1340      if (Val < 0) Val = -Val;
1341      Val = ARM_AM::getAM3Opc(AddSub, Val);
1342    } else {
1343      // For register offset, we encode the shift type and negation flag
1344      // here.
1345      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1346    }
1347    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1348    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1349    Inst.addOperand(MCOperand::CreateImm(Val));
1350  }
1351
1352  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1353    assert(N == 2 && "Invalid number of operands!");
1354    if (Kind == k_PostIndexRegister) {
1355      int32_t Val =
1356        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1357      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1358      Inst.addOperand(MCOperand::CreateImm(Val));
1359      return;
1360    }
1361
1362    // Constant offset.
1363    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1364    int32_t Val = CE->getValue();
1365    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1366    // Special case for #-0
1367    if (Val == INT32_MIN) Val = 0;
1368    if (Val < 0) Val = -Val;
1369    Val = ARM_AM::getAM3Opc(AddSub, Val);
1370    Inst.addOperand(MCOperand::CreateReg(0));
1371    Inst.addOperand(MCOperand::CreateImm(Val));
1372  }
1373
1374  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1375    assert(N == 2 && "Invalid number of operands!");
1376    // If we have an immediate that's not a constant, treat it as a label
1377    // reference needing a fixup. If it is a constant, it's something else
1378    // and we reject it.
1379    if (isImm()) {
1380      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1381      Inst.addOperand(MCOperand::CreateImm(0));
1382      return;
1383    }
1384
1385    // The lower two bits are always zero and as such are not encoded.
1386    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1387    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1388    // Special case for #-0
1389    if (Val == INT32_MIN) Val = 0;
1390    if (Val < 0) Val = -Val;
1391    Val = ARM_AM::getAM5Opc(AddSub, Val);
1392    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1393    Inst.addOperand(MCOperand::CreateImm(Val));
1394  }
1395
1396  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1397    assert(N == 2 && "Invalid number of operands!");
1398    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1399    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1400    Inst.addOperand(MCOperand::CreateImm(Val));
1401  }
1402
1403  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1404    assert(N == 2 && "Invalid number of operands!");
1405    // The lower two bits are always zero and as such are not encoded.
1406    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1407    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1408    Inst.addOperand(MCOperand::CreateImm(Val));
1409  }
1410
1411  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1412    assert(N == 2 && "Invalid number of operands!");
1413    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1414    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1415    Inst.addOperand(MCOperand::CreateImm(Val));
1416  }
1417
1418  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1419    addMemImm8OffsetOperands(Inst, N);
1420  }
1421
1422  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1423    addMemImm8OffsetOperands(Inst, N);
1424  }
1425
1426  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1427    assert(N == 2 && "Invalid number of operands!");
1428    // If this is an immediate, it's a label reference.
1429    if (Kind == k_Immediate) {
1430      addExpr(Inst, getImm());
1431      Inst.addOperand(MCOperand::CreateImm(0));
1432      return;
1433    }
1434
1435    // Otherwise, it's a normal memory reg+offset.
1436    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1437    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1438    Inst.addOperand(MCOperand::CreateImm(Val));
1439  }
1440
1441  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1442    assert(N == 2 && "Invalid number of operands!");
1443    // If this is an immediate, it's a label reference.
1444    if (Kind == k_Immediate) {
1445      addExpr(Inst, getImm());
1446      Inst.addOperand(MCOperand::CreateImm(0));
1447      return;
1448    }
1449
1450    // Otherwise, it's a normal memory reg+offset.
1451    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1452    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1453    Inst.addOperand(MCOperand::CreateImm(Val));
1454  }
1455
1456  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1457    assert(N == 2 && "Invalid number of operands!");
1458    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1459    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1460  }
1461
1462  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1463    assert(N == 2 && "Invalid number of operands!");
1464    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1465    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1466  }
1467
1468  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1469    assert(N == 3 && "Invalid number of operands!");
1470    unsigned Val =
1471      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1472                        Memory.ShiftImm, Memory.ShiftType);
1473    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1474    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1475    Inst.addOperand(MCOperand::CreateImm(Val));
1476  }
1477
1478  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1479    assert(N == 3 && "Invalid number of operands!");
1480    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1481    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1482    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1483  }
1484
1485  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1486    assert(N == 2 && "Invalid number of operands!");
1487    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1488    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1489  }
1490
1491  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1492    assert(N == 2 && "Invalid number of operands!");
1493    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1494    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1495    Inst.addOperand(MCOperand::CreateImm(Val));
1496  }
1497
1498  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1499    assert(N == 2 && "Invalid number of operands!");
1500    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1501    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1502    Inst.addOperand(MCOperand::CreateImm(Val));
1503  }
1504
1505  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1506    assert(N == 2 && "Invalid number of operands!");
1507    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1508    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1509    Inst.addOperand(MCOperand::CreateImm(Val));
1510  }
1511
1512  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1513    assert(N == 2 && "Invalid number of operands!");
1514    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1515    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1516    Inst.addOperand(MCOperand::CreateImm(Val));
1517  }
1518
1519  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1520    assert(N == 1 && "Invalid number of operands!");
1521    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1522    assert(CE && "non-constant post-idx-imm8 operand!");
1523    int Imm = CE->getValue();
1524    bool isAdd = Imm >= 0;
1525    if (Imm == INT32_MIN) Imm = 0;
1526    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1527    Inst.addOperand(MCOperand::CreateImm(Imm));
1528  }
1529
1530  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1531    assert(N == 1 && "Invalid number of operands!");
1532    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1533    assert(CE && "non-constant post-idx-imm8s4 operand!");
1534    int Imm = CE->getValue();
1535    bool isAdd = Imm >= 0;
1536    if (Imm == INT32_MIN) Imm = 0;
1537    // Immediate is scaled by 4.
1538    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1539    Inst.addOperand(MCOperand::CreateImm(Imm));
1540  }
1541
1542  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1543    assert(N == 2 && "Invalid number of operands!");
1544    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1545    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1546  }
1547
1548  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1549    assert(N == 2 && "Invalid number of operands!");
1550    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1551    // The sign, shift type, and shift amount are encoded in a single operand
1552    // using the AM2 encoding helpers.
1553    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1554    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1555                                     PostIdxReg.ShiftTy);
1556    Inst.addOperand(MCOperand::CreateImm(Imm));
1557  }
1558
1559  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1560    assert(N == 1 && "Invalid number of operands!");
1561    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1562  }
1563
1564  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1565    assert(N == 1 && "Invalid number of operands!");
1566    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1567  }
1568
1569  void addVecListOperands(MCInst &Inst, unsigned N) const {
1570    assert(N == 1 && "Invalid number of operands!");
1571    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1572  }
1573
1574  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1575    assert(N == 2 && "Invalid number of operands!");
1576    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1577    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1578  }
1579
1580  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1581    assert(N == 1 && "Invalid number of operands!");
1582    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1583  }
1584
1585  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1586    assert(N == 1 && "Invalid number of operands!");
1587    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1588  }
1589
1590  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1591    assert(N == 1 && "Invalid number of operands!");
1592    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1593  }
1594
1595  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1596    assert(N == 1 && "Invalid number of operands!");
1597    // The immediate encodes the type of constant as well as the value.
1598    // Mask in that this is an i8 splat.
1599    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1600    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1601  }
1602
1603  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1604    assert(N == 1 && "Invalid number of operands!");
1605    // The immediate encodes the type of constant as well as the value.
1606    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1607    unsigned Value = CE->getValue();
1608    if (Value >= 256)
1609      Value = (Value >> 8) | 0xa00;
1610    else
1611      Value |= 0x800;
1612    Inst.addOperand(MCOperand::CreateImm(Value));
1613  }
1614
1615  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1616    assert(N == 1 && "Invalid number of operands!");
1617    // The immediate encodes the type of constant as well as the value.
1618    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1619    unsigned Value = CE->getValue();
1620    if (Value >= 256 && Value <= 0xff00)
1621      Value = (Value >> 8) | 0x200;
1622    else if (Value > 0xffff && Value <= 0xff0000)
1623      Value = (Value >> 16) | 0x400;
1624    else if (Value > 0xffffff)
1625      Value = (Value >> 24) | 0x600;
1626    Inst.addOperand(MCOperand::CreateImm(Value));
1627  }
1628
1629  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1630    assert(N == 1 && "Invalid number of operands!");
1631    // The immediate encodes the type of constant as well as the value.
1632    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1633    unsigned Value = CE->getValue();
1634    if (Value >= 256 && Value <= 0xffff)
1635      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1636    else if (Value > 0xffff && Value <= 0xffffff)
1637      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1638    else if (Value > 0xffffff)
1639      Value = (Value >> 24) | 0x600;
1640    Inst.addOperand(MCOperand::CreateImm(Value));
1641  }
1642
1643  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1644    assert(N == 1 && "Invalid number of operands!");
1645    // The immediate encodes the type of constant as well as the value.
1646    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1647    uint64_t Value = CE->getValue();
1648    unsigned Imm = 0;
1649    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1650      Imm |= (Value & 1) << i;
1651    }
1652    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1653  }
1654
1655  virtual void print(raw_ostream &OS) const;
1656
1657  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1658    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1659    Op->ITMask.Mask = Mask;
1660    Op->StartLoc = S;
1661    Op->EndLoc = S;
1662    return Op;
1663  }
1664
1665  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1666    ARMOperand *Op = new ARMOperand(k_CondCode);
1667    Op->CC.Val = CC;
1668    Op->StartLoc = S;
1669    Op->EndLoc = S;
1670    return Op;
1671  }
1672
1673  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1674    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1675    Op->Cop.Val = CopVal;
1676    Op->StartLoc = S;
1677    Op->EndLoc = S;
1678    return Op;
1679  }
1680
1681  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1682    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1683    Op->Cop.Val = CopVal;
1684    Op->StartLoc = S;
1685    Op->EndLoc = S;
1686    return Op;
1687  }
1688
1689  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1690    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1691    Op->Cop.Val = Val;
1692    Op->StartLoc = S;
1693    Op->EndLoc = E;
1694    return Op;
1695  }
1696
1697  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1698    ARMOperand *Op = new ARMOperand(k_CCOut);
1699    Op->Reg.RegNum = RegNum;
1700    Op->StartLoc = S;
1701    Op->EndLoc = S;
1702    return Op;
1703  }
1704
1705  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1706    ARMOperand *Op = new ARMOperand(k_Token);
1707    Op->Tok.Data = Str.data();
1708    Op->Tok.Length = Str.size();
1709    Op->StartLoc = S;
1710    Op->EndLoc = S;
1711    return Op;
1712  }
1713
1714  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1715    ARMOperand *Op = new ARMOperand(k_Register);
1716    Op->Reg.RegNum = RegNum;
1717    Op->StartLoc = S;
1718    Op->EndLoc = E;
1719    return Op;
1720  }
1721
1722  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1723                                           unsigned SrcReg,
1724                                           unsigned ShiftReg,
1725                                           unsigned ShiftImm,
1726                                           SMLoc S, SMLoc E) {
1727    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1728    Op->RegShiftedReg.ShiftTy = ShTy;
1729    Op->RegShiftedReg.SrcReg = SrcReg;
1730    Op->RegShiftedReg.ShiftReg = ShiftReg;
1731    Op->RegShiftedReg.ShiftImm = ShiftImm;
1732    Op->StartLoc = S;
1733    Op->EndLoc = E;
1734    return Op;
1735  }
1736
1737  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1738                                            unsigned SrcReg,
1739                                            unsigned ShiftImm,
1740                                            SMLoc S, SMLoc E) {
1741    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1742    Op->RegShiftedImm.ShiftTy = ShTy;
1743    Op->RegShiftedImm.SrcReg = SrcReg;
1744    Op->RegShiftedImm.ShiftImm = ShiftImm;
1745    Op->StartLoc = S;
1746    Op->EndLoc = E;
1747    return Op;
1748  }
1749
1750  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1751                                   SMLoc S, SMLoc E) {
1752    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1753    Op->ShifterImm.isASR = isASR;
1754    Op->ShifterImm.Imm = Imm;
1755    Op->StartLoc = S;
1756    Op->EndLoc = E;
1757    return Op;
1758  }
1759
1760  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1761    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1762    Op->RotImm.Imm = Imm;
1763    Op->StartLoc = S;
1764    Op->EndLoc = E;
1765    return Op;
1766  }
1767
1768  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1769                                    SMLoc S, SMLoc E) {
1770    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1771    Op->Bitfield.LSB = LSB;
1772    Op->Bitfield.Width = Width;
1773    Op->StartLoc = S;
1774    Op->EndLoc = E;
1775    return Op;
1776  }
1777
1778  static ARMOperand *
1779  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1780                SMLoc StartLoc, SMLoc EndLoc) {
1781    KindTy Kind = k_RegisterList;
1782
1783    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1784      Kind = k_DPRRegisterList;
1785    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
1786             contains(Regs.front().first))
1787      Kind = k_SPRRegisterList;
1788
1789    ARMOperand *Op = new ARMOperand(Kind);
1790    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
1791           I = Regs.begin(), E = Regs.end(); I != E; ++I)
1792      Op->Registers.push_back(I->first);
1793    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
1794    Op->StartLoc = StartLoc;
1795    Op->EndLoc = EndLoc;
1796    return Op;
1797  }
1798
1799  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
1800                                      SMLoc S, SMLoc E) {
1801    ARMOperand *Op = new ARMOperand(k_VectorList);
1802    Op->VectorList.RegNum = RegNum;
1803    Op->VectorList.Count = Count;
1804    Op->StartLoc = S;
1805    Op->EndLoc = E;
1806    return Op;
1807  }
1808
1809  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
1810                                              SMLoc S, SMLoc E) {
1811    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
1812    Op->VectorList.RegNum = RegNum;
1813    Op->VectorList.Count = Count;
1814    Op->StartLoc = S;
1815    Op->EndLoc = E;
1816    return Op;
1817  }
1818
1819  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
1820                                             unsigned Index, SMLoc S, SMLoc E) {
1821    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
1822    Op->VectorList.RegNum = RegNum;
1823    Op->VectorList.Count = Count;
1824    Op->VectorList.LaneIndex = Index;
1825    Op->StartLoc = S;
1826    Op->EndLoc = E;
1827    return Op;
1828  }
1829
1830  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1831                                       MCContext &Ctx) {
1832    ARMOperand *Op = new ARMOperand(k_VectorIndex);
1833    Op->VectorIndex.Val = Idx;
1834    Op->StartLoc = S;
1835    Op->EndLoc = E;
1836    return Op;
1837  }
1838
1839  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
1840    ARMOperand *Op = new ARMOperand(k_Immediate);
1841    Op->Imm.Val = Val;
1842    Op->StartLoc = S;
1843    Op->EndLoc = E;
1844    return Op;
1845  }
1846
1847  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1848    ARMOperand *Op = new ARMOperand(k_FPImmediate);
1849    Op->FPImm.Val = Val;
1850    Op->StartLoc = S;
1851    Op->EndLoc = S;
1852    return Op;
1853  }
1854
1855  static ARMOperand *CreateMem(unsigned BaseRegNum,
1856                               const MCConstantExpr *OffsetImm,
1857                               unsigned OffsetRegNum,
1858                               ARM_AM::ShiftOpc ShiftType,
1859                               unsigned ShiftImm,
1860                               unsigned Alignment,
1861                               bool isNegative,
1862                               SMLoc S, SMLoc E) {
1863    ARMOperand *Op = new ARMOperand(k_Memory);
1864    Op->Memory.BaseRegNum = BaseRegNum;
1865    Op->Memory.OffsetImm = OffsetImm;
1866    Op->Memory.OffsetRegNum = OffsetRegNum;
1867    Op->Memory.ShiftType = ShiftType;
1868    Op->Memory.ShiftImm = ShiftImm;
1869    Op->Memory.Alignment = Alignment;
1870    Op->Memory.isNegative = isNegative;
1871    Op->StartLoc = S;
1872    Op->EndLoc = E;
1873    return Op;
1874  }
1875
1876  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
1877                                      ARM_AM::ShiftOpc ShiftTy,
1878                                      unsigned ShiftImm,
1879                                      SMLoc S, SMLoc E) {
1880    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
1881    Op->PostIdxReg.RegNum = RegNum;
1882    Op->PostIdxReg.isAdd = isAdd;
1883    Op->PostIdxReg.ShiftTy = ShiftTy;
1884    Op->PostIdxReg.ShiftImm = ShiftImm;
1885    Op->StartLoc = S;
1886    Op->EndLoc = E;
1887    return Op;
1888  }
1889
1890  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
1891    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
1892    Op->MBOpt.Val = Opt;
1893    Op->StartLoc = S;
1894    Op->EndLoc = S;
1895    return Op;
1896  }
1897
1898  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
1899    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
1900    Op->IFlags.Val = IFlags;
1901    Op->StartLoc = S;
1902    Op->EndLoc = S;
1903    return Op;
1904  }
1905
1906  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
1907    ARMOperand *Op = new ARMOperand(k_MSRMask);
1908    Op->MMask.Val = MMask;
1909    Op->StartLoc = S;
1910    Op->EndLoc = S;
1911    return Op;
1912  }
1913};
1914
1915} // end anonymous namespace.
1916
1917void ARMOperand::print(raw_ostream &OS) const {
1918  switch (Kind) {
1919  case k_FPImmediate:
1920    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
1921       << ") >";
1922    break;
1923  case k_CondCode:
1924    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
1925    break;
1926  case k_CCOut:
1927    OS << "<ccout " << getReg() << ">";
1928    break;
1929  case k_ITCondMask: {
1930    static const char *MaskStr[] = {
1931      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
1932      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
1933    };
1934    assert((ITMask.Mask & 0xf) == ITMask.Mask);
1935    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
1936    break;
1937  }
1938  case k_CoprocNum:
1939    OS << "<coprocessor number: " << getCoproc() << ">";
1940    break;
1941  case k_CoprocReg:
1942    OS << "<coprocessor register: " << getCoproc() << ">";
1943    break;
1944  case k_CoprocOption:
1945    OS << "<coprocessor option: " << CoprocOption.Val << ">";
1946    break;
1947  case k_MSRMask:
1948    OS << "<mask: " << getMSRMask() << ">";
1949    break;
1950  case k_Immediate:
1951    getImm()->print(OS);
1952    break;
1953  case k_MemBarrierOpt:
1954    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
1955    break;
1956  case k_Memory:
1957    OS << "<memory "
1958       << " base:" << Memory.BaseRegNum;
1959    OS << ">";
1960    break;
1961  case k_PostIndexRegister:
1962    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
1963       << PostIdxReg.RegNum;
1964    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
1965      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
1966         << PostIdxReg.ShiftImm;
1967    OS << ">";
1968    break;
1969  case k_ProcIFlags: {
1970    OS << "<ARM_PROC::";
1971    unsigned IFlags = getProcIFlags();
1972    for (int i=2; i >= 0; --i)
1973      if (IFlags & (1 << i))
1974        OS << ARM_PROC::IFlagsToString(1 << i);
1975    OS << ">";
1976    break;
1977  }
1978  case k_Register:
1979    OS << "<register " << getReg() << ">";
1980    break;
1981  case k_ShifterImmediate:
1982    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
1983       << " #" << ShifterImm.Imm << ">";
1984    break;
1985  case k_ShiftedRegister:
1986    OS << "<so_reg_reg "
1987       << RegShiftedReg.SrcReg << " "
1988       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
1989       << " " << RegShiftedReg.ShiftReg << ">";
1990    break;
1991  case k_ShiftedImmediate:
1992    OS << "<so_reg_imm "
1993       << RegShiftedImm.SrcReg << " "
1994       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
1995       << " #" << RegShiftedImm.ShiftImm << ">";
1996    break;
1997  case k_RotateImmediate:
1998    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
1999    break;
2000  case k_BitfieldDescriptor:
2001    OS << "<bitfield " << "lsb: " << Bitfield.LSB
2002       << ", width: " << Bitfield.Width << ">";
2003    break;
2004  case k_RegisterList:
2005  case k_DPRRegisterList:
2006  case k_SPRRegisterList: {
2007    OS << "<register_list ";
2008
2009    const SmallVectorImpl<unsigned> &RegList = getRegList();
2010    for (SmallVectorImpl<unsigned>::const_iterator
2011           I = RegList.begin(), E = RegList.end(); I != E; ) {
2012      OS << *I;
2013      if (++I < E) OS << ", ";
2014    }
2015
2016    OS << ">";
2017    break;
2018  }
2019  case k_VectorList:
2020    OS << "<vector_list " << VectorList.Count << " * "
2021       << VectorList.RegNum << ">";
2022    break;
2023  case k_VectorListAllLanes:
2024    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2025       << VectorList.RegNum << ">";
2026    break;
2027  case k_VectorListIndexed:
2028    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2029       << VectorList.Count << " * " << VectorList.RegNum << ">";
2030    break;
2031  case k_Token:
2032    OS << "'" << getToken() << "'";
2033    break;
2034  case k_VectorIndex:
2035    OS << "<vectorindex " << getVectorIndex() << ">";
2036    break;
2037  }
2038}
2039
2040/// @name Auto-generated Match Functions
2041/// {
2042
2043static unsigned MatchRegisterName(StringRef Name);
2044
2045/// }
2046
2047bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2048                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2049  RegNo = tryParseRegister();
2050
2051  return (RegNo == (unsigned)-1);
2052}
2053
2054/// Try to parse a register name.  The token must be an Identifier when called,
2055/// and if it is a register name the token is eaten and the register number is
2056/// returned.  Otherwise return -1.
2057///
2058int ARMAsmParser::tryParseRegister() {
2059  const AsmToken &Tok = Parser.getTok();
2060  if (Tok.isNot(AsmToken::Identifier)) return -1;
2061
2062  // FIXME: Validate register for the current architecture; we have to do
2063  // validation later, so maybe there is no need for this here.
2064  std::string lowerCase = Tok.getString().lower();
2065  unsigned RegNum = MatchRegisterName(lowerCase);
2066  if (!RegNum) {
2067    RegNum = StringSwitch<unsigned>(lowerCase)
2068      .Case("r13", ARM::SP)
2069      .Case("r14", ARM::LR)
2070      .Case("r15", ARM::PC)
2071      .Case("ip", ARM::R12)
2072      .Default(0);
2073  }
2074  if (!RegNum) return -1;
2075
2076  Parser.Lex(); // Eat identifier token.
2077
2078  return RegNum;
2079}
2080
2081// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2082// If a recoverable error occurs, return 1. If an irrecoverable error
2083// occurs, return -1. An irrecoverable error is one where tokens have been
2084// consumed in the process of trying to parse the shifter (i.e., when it is
2085// indeed a shifter operand, but malformed).
2086int ARMAsmParser::tryParseShiftRegister(
2087                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2088  SMLoc S = Parser.getTok().getLoc();
2089  const AsmToken &Tok = Parser.getTok();
2090  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2091
2092  std::string lowerCase = Tok.getString().lower();
2093  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2094      .Case("lsl", ARM_AM::lsl)
2095      .Case("lsr", ARM_AM::lsr)
2096      .Case("asr", ARM_AM::asr)
2097      .Case("ror", ARM_AM::ror)
2098      .Case("rrx", ARM_AM::rrx)
2099      .Default(ARM_AM::no_shift);
2100
2101  if (ShiftTy == ARM_AM::no_shift)
2102    return 1;
2103
2104  Parser.Lex(); // Eat the operator.
2105
2106  // The source register for the shift has already been added to the
2107  // operand list, so we need to pop it off and combine it into the shifted
2108  // register operand instead.
2109  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2110  if (!PrevOp->isReg())
2111    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2112  int SrcReg = PrevOp->getReg();
2113  int64_t Imm = 0;
2114  int ShiftReg = 0;
2115  if (ShiftTy == ARM_AM::rrx) {
2116    // RRX Doesn't have an explicit shift amount. The encoder expects
2117    // the shift register to be the same as the source register. Seems odd,
2118    // but OK.
2119    ShiftReg = SrcReg;
2120  } else {
2121    // Figure out if this is shifted by a constant or a register (for non-RRX).
2122    if (Parser.getTok().is(AsmToken::Hash)) {
2123      Parser.Lex(); // Eat hash.
2124      SMLoc ImmLoc = Parser.getTok().getLoc();
2125      const MCExpr *ShiftExpr = 0;
2126      if (getParser().ParseExpression(ShiftExpr)) {
2127        Error(ImmLoc, "invalid immediate shift value");
2128        return -1;
2129      }
2130      // The expression must be evaluatable as an immediate.
2131      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2132      if (!CE) {
2133        Error(ImmLoc, "invalid immediate shift value");
2134        return -1;
2135      }
2136      // Range check the immediate.
2137      // lsl, ror: 0 <= imm <= 31
2138      // lsr, asr: 0 <= imm <= 32
2139      Imm = CE->getValue();
2140      if (Imm < 0 ||
2141          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2142          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2143        Error(ImmLoc, "immediate shift value out of range");
2144        return -1;
2145      }
2146    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2147      ShiftReg = tryParseRegister();
2148      SMLoc L = Parser.getTok().getLoc();
2149      if (ShiftReg == -1) {
2150        Error (L, "expected immediate or register in shift operand");
2151        return -1;
2152      }
2153    } else {
2154      Error (Parser.getTok().getLoc(),
2155                    "expected immediate or register in shift operand");
2156      return -1;
2157    }
2158  }
2159
2160  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2161    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2162                                                         ShiftReg, Imm,
2163                                               S, Parser.getTok().getLoc()));
2164  else
2165    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2166                                               S, Parser.getTok().getLoc()));
2167
2168  return 0;
2169}
2170
2171
2172/// Try to parse a register name.  The token must be an Identifier when called.
2173/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2174/// if there is a "writeback". 'true' if it's not a register.
2175///
2176/// TODO this is likely to change to allow different register types and or to
2177/// parse for a specific register type.
2178bool ARMAsmParser::
2179tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2180  SMLoc S = Parser.getTok().getLoc();
2181  int RegNo = tryParseRegister();
2182  if (RegNo == -1)
2183    return true;
2184
2185  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2186
2187  const AsmToken &ExclaimTok = Parser.getTok();
2188  if (ExclaimTok.is(AsmToken::Exclaim)) {
2189    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2190                                               ExclaimTok.getLoc()));
2191    Parser.Lex(); // Eat exclaim token
2192    return false;
2193  }
2194
2195  // Also check for an index operand. This is only legal for vector registers,
2196  // but that'll get caught OK in operand matching, so we don't need to
2197  // explicitly filter everything else out here.
2198  if (Parser.getTok().is(AsmToken::LBrac)) {
2199    SMLoc SIdx = Parser.getTok().getLoc();
2200    Parser.Lex(); // Eat left bracket token.
2201
2202    const MCExpr *ImmVal;
2203    if (getParser().ParseExpression(ImmVal))
2204      return MatchOperand_ParseFail;
2205    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2206    if (!MCE) {
2207      TokError("immediate value expected for vector index");
2208      return MatchOperand_ParseFail;
2209    }
2210
2211    SMLoc E = Parser.getTok().getLoc();
2212    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2213      Error(E, "']' expected");
2214      return MatchOperand_ParseFail;
2215    }
2216
2217    Parser.Lex(); // Eat right bracket token.
2218
2219    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2220                                                     SIdx, E,
2221                                                     getContext()));
2222  }
2223
2224  return false;
2225}
2226
2227/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2228/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2229/// "c5", ...
2230static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2231  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2232  // but efficient.
2233  switch (Name.size()) {
2234  default: break;
2235  case 2:
2236    if (Name[0] != CoprocOp)
2237      return -1;
2238    switch (Name[1]) {
2239    default:  return -1;
2240    case '0': return 0;
2241    case '1': return 1;
2242    case '2': return 2;
2243    case '3': return 3;
2244    case '4': return 4;
2245    case '5': return 5;
2246    case '6': return 6;
2247    case '7': return 7;
2248    case '8': return 8;
2249    case '9': return 9;
2250    }
2251    break;
2252  case 3:
2253    if (Name[0] != CoprocOp || Name[1] != '1')
2254      return -1;
2255    switch (Name[2]) {
2256    default:  return -1;
2257    case '0': return 10;
2258    case '1': return 11;
2259    case '2': return 12;
2260    case '3': return 13;
2261    case '4': return 14;
2262    case '5': return 15;
2263    }
2264    break;
2265  }
2266
2267  return -1;
2268}
2269
2270/// parseITCondCode - Try to parse a condition code for an IT instruction.
2271ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2272parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2273  SMLoc S = Parser.getTok().getLoc();
2274  const AsmToken &Tok = Parser.getTok();
2275  if (!Tok.is(AsmToken::Identifier))
2276    return MatchOperand_NoMatch;
2277  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2278    .Case("eq", ARMCC::EQ)
2279    .Case("ne", ARMCC::NE)
2280    .Case("hs", ARMCC::HS)
2281    .Case("cs", ARMCC::HS)
2282    .Case("lo", ARMCC::LO)
2283    .Case("cc", ARMCC::LO)
2284    .Case("mi", ARMCC::MI)
2285    .Case("pl", ARMCC::PL)
2286    .Case("vs", ARMCC::VS)
2287    .Case("vc", ARMCC::VC)
2288    .Case("hi", ARMCC::HI)
2289    .Case("ls", ARMCC::LS)
2290    .Case("ge", ARMCC::GE)
2291    .Case("lt", ARMCC::LT)
2292    .Case("gt", ARMCC::GT)
2293    .Case("le", ARMCC::LE)
2294    .Case("al", ARMCC::AL)
2295    .Default(~0U);
2296  if (CC == ~0U)
2297    return MatchOperand_NoMatch;
2298  Parser.Lex(); // Eat the token.
2299
2300  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2301
2302  return MatchOperand_Success;
2303}
2304
2305/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2306/// token must be an Identifier when called, and if it is a coprocessor
2307/// number, the token is eaten and the operand is added to the operand list.
2308ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2309parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2310  SMLoc S = Parser.getTok().getLoc();
2311  const AsmToken &Tok = Parser.getTok();
2312  if (Tok.isNot(AsmToken::Identifier))
2313    return MatchOperand_NoMatch;
2314
2315  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2316  if (Num == -1)
2317    return MatchOperand_NoMatch;
2318
2319  Parser.Lex(); // Eat identifier token.
2320  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2321  return MatchOperand_Success;
2322}
2323
2324/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2325/// token must be an Identifier when called, and if it is a coprocessor
2326/// number, the token is eaten and the operand is added to the operand list.
2327ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2328parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2329  SMLoc S = Parser.getTok().getLoc();
2330  const AsmToken &Tok = Parser.getTok();
2331  if (Tok.isNot(AsmToken::Identifier))
2332    return MatchOperand_NoMatch;
2333
2334  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2335  if (Reg == -1)
2336    return MatchOperand_NoMatch;
2337
2338  Parser.Lex(); // Eat identifier token.
2339  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2340  return MatchOperand_Success;
2341}
2342
2343/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2344/// coproc_option : '{' imm0_255 '}'
2345ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2346parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2347  SMLoc S = Parser.getTok().getLoc();
2348
2349  // If this isn't a '{', this isn't a coprocessor immediate operand.
2350  if (Parser.getTok().isNot(AsmToken::LCurly))
2351    return MatchOperand_NoMatch;
2352  Parser.Lex(); // Eat the '{'
2353
2354  const MCExpr *Expr;
2355  SMLoc Loc = Parser.getTok().getLoc();
2356  if (getParser().ParseExpression(Expr)) {
2357    Error(Loc, "illegal expression");
2358    return MatchOperand_ParseFail;
2359  }
2360  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2361  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2362    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2363    return MatchOperand_ParseFail;
2364  }
2365  int Val = CE->getValue();
2366
2367  // Check for and consume the closing '}'
2368  if (Parser.getTok().isNot(AsmToken::RCurly))
2369    return MatchOperand_ParseFail;
2370  SMLoc E = Parser.getTok().getLoc();
2371  Parser.Lex(); // Eat the '}'
2372
2373  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2374  return MatchOperand_Success;
2375}
2376
2377// For register list parsing, we need to map from raw GPR register numbering
2378// to the enumeration values. The enumeration values aren't sorted by
2379// register number due to our using "sp", "lr" and "pc" as canonical names.
2380static unsigned getNextRegister(unsigned Reg) {
2381  // If this is a GPR, we need to do it manually, otherwise we can rely
2382  // on the sort ordering of the enumeration since the other reg-classes
2383  // are sane.
2384  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2385    return Reg + 1;
2386  switch(Reg) {
2387  default: assert(0 && "Invalid GPR number!");
2388  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2389  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2390  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2391  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2392  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2393  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2394  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2395  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2396  }
2397}
2398
2399// Return the low-subreg of a given Q register.
2400static unsigned getDRegFromQReg(unsigned QReg) {
2401  switch (QReg) {
2402  default: llvm_unreachable("expected a Q register!");
2403  case ARM::Q0:  return ARM::D0;
2404  case ARM::Q1:  return ARM::D2;
2405  case ARM::Q2:  return ARM::D4;
2406  case ARM::Q3:  return ARM::D6;
2407  case ARM::Q4:  return ARM::D8;
2408  case ARM::Q5:  return ARM::D10;
2409  case ARM::Q6:  return ARM::D12;
2410  case ARM::Q7:  return ARM::D14;
2411  case ARM::Q8:  return ARM::D16;
2412  case ARM::Q9:  return ARM::D18;
2413  case ARM::Q10: return ARM::D20;
2414  case ARM::Q11: return ARM::D22;
2415  case ARM::Q12: return ARM::D24;
2416  case ARM::Q13: return ARM::D26;
2417  case ARM::Q14: return ARM::D28;
2418  case ARM::Q15: return ARM::D30;
2419  }
2420}
2421
2422/// Parse a register list.
2423bool ARMAsmParser::
2424parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2425  assert(Parser.getTok().is(AsmToken::LCurly) &&
2426         "Token is not a Left Curly Brace");
2427  SMLoc S = Parser.getTok().getLoc();
2428  Parser.Lex(); // Eat '{' token.
2429  SMLoc RegLoc = Parser.getTok().getLoc();
2430
2431  // Check the first register in the list to see what register class
2432  // this is a list of.
2433  int Reg = tryParseRegister();
2434  if (Reg == -1)
2435    return Error(RegLoc, "register expected");
2436
2437  // The reglist instructions have at most 16 registers, so reserve
2438  // space for that many.
2439  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2440
2441  // Allow Q regs and just interpret them as the two D sub-registers.
2442  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2443    Reg = getDRegFromQReg(Reg);
2444    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2445    ++Reg;
2446  }
2447  const MCRegisterClass *RC;
2448  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2449    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2450  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2451    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2452  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2453    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2454  else
2455    return Error(RegLoc, "invalid register in register list");
2456
2457  // Store the register.
2458  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2459
2460  // This starts immediately after the first register token in the list,
2461  // so we can see either a comma or a minus (range separator) as a legal
2462  // next token.
2463  while (Parser.getTok().is(AsmToken::Comma) ||
2464         Parser.getTok().is(AsmToken::Minus)) {
2465    if (Parser.getTok().is(AsmToken::Minus)) {
2466      Parser.Lex(); // Eat the minus.
2467      SMLoc EndLoc = Parser.getTok().getLoc();
2468      int EndReg = tryParseRegister();
2469      if (EndReg == -1)
2470        return Error(EndLoc, "register expected");
2471      // Allow Q regs and just interpret them as the two D sub-registers.
2472      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2473        EndReg = getDRegFromQReg(EndReg) + 1;
2474      // If the register is the same as the start reg, there's nothing
2475      // more to do.
2476      if (Reg == EndReg)
2477        continue;
2478      // The register must be in the same register class as the first.
2479      if (!RC->contains(EndReg))
2480        return Error(EndLoc, "invalid register in register list");
2481      // Ranges must go from low to high.
2482      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2483        return Error(EndLoc, "bad range in register list");
2484
2485      // Add all the registers in the range to the register list.
2486      while (Reg != EndReg) {
2487        Reg = getNextRegister(Reg);
2488        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2489      }
2490      continue;
2491    }
2492    Parser.Lex(); // Eat the comma.
2493    RegLoc = Parser.getTok().getLoc();
2494    int OldReg = Reg;
2495    Reg = tryParseRegister();
2496    if (Reg == -1)
2497      return Error(RegLoc, "register expected");
2498    // Allow Q regs and just interpret them as the two D sub-registers.
2499    bool isQReg = false;
2500    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2501      Reg = getDRegFromQReg(Reg);
2502      isQReg = true;
2503    }
2504    // The register must be in the same register class as the first.
2505    if (!RC->contains(Reg))
2506      return Error(RegLoc, "invalid register in register list");
2507    // List must be monotonically increasing.
2508    if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg))
2509      return Error(RegLoc, "register list not in ascending order");
2510    // VFP register lists must also be contiguous.
2511    // It's OK to use the enumeration values directly here rather, as the
2512    // VFP register classes have the enum sorted properly.
2513    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2514        Reg != OldReg + 1)
2515      return Error(RegLoc, "non-contiguous register range");
2516    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2517    if (isQReg)
2518      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2519  }
2520
2521  SMLoc E = Parser.getTok().getLoc();
2522  if (Parser.getTok().isNot(AsmToken::RCurly))
2523    return Error(E, "'}' expected");
2524  Parser.Lex(); // Eat '}' token.
2525
2526  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2527  return false;
2528}
2529
2530// Helper function to parse the lane index for vector lists.
2531ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2532parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2533  Index = 0; // Always return a defined index value.
2534  if (Parser.getTok().is(AsmToken::LBrac)) {
2535    Parser.Lex(); // Eat the '['.
2536    if (Parser.getTok().is(AsmToken::RBrac)) {
2537      // "Dn[]" is the 'all lanes' syntax.
2538      LaneKind = AllLanes;
2539      Parser.Lex(); // Eat the ']'.
2540      return MatchOperand_Success;
2541    }
2542    if (Parser.getTok().is(AsmToken::Integer)) {
2543      int64_t Val = Parser.getTok().getIntVal();
2544      // Make this range check context sensitive for .8, .16, .32.
2545      if (Val < 0 && Val > 7)
2546        Error(Parser.getTok().getLoc(), "lane index out of range");
2547      Index = Val;
2548      LaneKind = IndexedLane;
2549      Parser.Lex(); // Eat the token;
2550      if (Parser.getTok().isNot(AsmToken::RBrac))
2551        Error(Parser.getTok().getLoc(), "']' expected");
2552      Parser.Lex(); // Eat the ']'.
2553      return MatchOperand_Success;
2554    }
2555    Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2556    return MatchOperand_ParseFail;
2557  }
2558  LaneKind = NoLanes;
2559  return MatchOperand_Success;
2560}
2561
2562// parse a vector register list
2563ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2564parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2565  VectorLaneTy LaneKind;
2566  unsigned LaneIndex;
2567  SMLoc S = Parser.getTok().getLoc();
2568  // As an extension (to match gas), support a plain D register or Q register
2569  // (without encosing curly braces) as a single or double entry list,
2570  // respectively.
2571  if (Parser.getTok().is(AsmToken::Identifier)) {
2572    int Reg = tryParseRegister();
2573    if (Reg == -1)
2574      return MatchOperand_NoMatch;
2575    SMLoc E = Parser.getTok().getLoc();
2576    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2577      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2578      if (Res != MatchOperand_Success)
2579        return Res;
2580      switch (LaneKind) {
2581      default:
2582        assert(0 && "unexpected lane kind!");
2583      case NoLanes:
2584        E = Parser.getTok().getLoc();
2585        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E));
2586        break;
2587      case AllLanes:
2588        E = Parser.getTok().getLoc();
2589        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2590        break;
2591      case IndexedLane:
2592        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2593                                                               LaneIndex, S,E));
2594        break;
2595      }
2596      return MatchOperand_Success;
2597    }
2598    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2599      Reg = getDRegFromQReg(Reg);
2600      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2601      if (Res != MatchOperand_Success)
2602        return Res;
2603      switch (LaneKind) {
2604      default:
2605        assert(0 && "unexpected lane kind!");
2606      case NoLanes:
2607        E = Parser.getTok().getLoc();
2608        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E));
2609        break;
2610      case AllLanes:
2611        E = Parser.getTok().getLoc();
2612        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2613        break;
2614      case IndexedLane:
2615        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2616                                                               LaneIndex, S,E));
2617        break;
2618      }
2619      return MatchOperand_Success;
2620    }
2621    Error(S, "vector register expected");
2622    return MatchOperand_ParseFail;
2623  }
2624
2625  if (Parser.getTok().isNot(AsmToken::LCurly))
2626    return MatchOperand_NoMatch;
2627
2628  Parser.Lex(); // Eat '{' token.
2629  SMLoc RegLoc = Parser.getTok().getLoc();
2630
2631  int Reg = tryParseRegister();
2632  if (Reg == -1) {
2633    Error(RegLoc, "register expected");
2634    return MatchOperand_ParseFail;
2635  }
2636  unsigned Count = 1;
2637  unsigned FirstReg = Reg;
2638  // The list is of D registers, but we also allow Q regs and just interpret
2639  // them as the two D sub-registers.
2640  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2641    FirstReg = Reg = getDRegFromQReg(Reg);
2642    ++Reg;
2643    ++Count;
2644  }
2645  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2646    return MatchOperand_ParseFail;
2647
2648  while (Parser.getTok().is(AsmToken::Comma) ||
2649         Parser.getTok().is(AsmToken::Minus)) {
2650    if (Parser.getTok().is(AsmToken::Minus)) {
2651      Parser.Lex(); // Eat the minus.
2652      SMLoc EndLoc = Parser.getTok().getLoc();
2653      int EndReg = tryParseRegister();
2654      if (EndReg == -1) {
2655        Error(EndLoc, "register expected");
2656        return MatchOperand_ParseFail;
2657      }
2658      // Allow Q regs and just interpret them as the two D sub-registers.
2659      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2660        EndReg = getDRegFromQReg(EndReg) + 1;
2661      // If the register is the same as the start reg, there's nothing
2662      // more to do.
2663      if (Reg == EndReg)
2664        continue;
2665      // The register must be in the same register class as the first.
2666      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2667        Error(EndLoc, "invalid register in register list");
2668        return MatchOperand_ParseFail;
2669      }
2670      // Ranges must go from low to high.
2671      if (Reg > EndReg) {
2672        Error(EndLoc, "bad range in register list");
2673        return MatchOperand_ParseFail;
2674      }
2675      // Parse the lane specifier if present.
2676      VectorLaneTy NextLaneKind;
2677      unsigned NextLaneIndex;
2678      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2679        return MatchOperand_ParseFail;
2680      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2681        Error(EndLoc, "mismatched lane index in register list");
2682        return MatchOperand_ParseFail;
2683      }
2684      EndLoc = Parser.getTok().getLoc();
2685
2686      // Add all the registers in the range to the register list.
2687      Count += EndReg - Reg;
2688      Reg = EndReg;
2689      continue;
2690    }
2691    Parser.Lex(); // Eat the comma.
2692    RegLoc = Parser.getTok().getLoc();
2693    int OldReg = Reg;
2694    Reg = tryParseRegister();
2695    if (Reg == -1) {
2696      Error(RegLoc, "register expected");
2697      return MatchOperand_ParseFail;
2698    }
2699    // vector register lists must be contiguous.
2700    // It's OK to use the enumeration values directly here rather, as the
2701    // VFP register classes have the enum sorted properly.
2702    //
2703    // The list is of D registers, but we also allow Q regs and just interpret
2704    // them as the two D sub-registers.
2705    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2706      Reg = getDRegFromQReg(Reg);
2707      if (Reg != OldReg + 1) {
2708        Error(RegLoc, "non-contiguous register range");
2709        return MatchOperand_ParseFail;
2710      }
2711      ++Reg;
2712      Count += 2;
2713      // Parse the lane specifier if present.
2714      VectorLaneTy NextLaneKind;
2715      unsigned NextLaneIndex;
2716      SMLoc EndLoc = Parser.getTok().getLoc();
2717      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2718        return MatchOperand_ParseFail;
2719      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2720        Error(EndLoc, "mismatched lane index in register list");
2721        return MatchOperand_ParseFail;
2722      }
2723      continue;
2724    }
2725    // Normal D register. Just check that it's contiguous and keep going.
2726    if (Reg != OldReg + 1) {
2727      Error(RegLoc, "non-contiguous register range");
2728      return MatchOperand_ParseFail;
2729    }
2730    ++Count;
2731    // Parse the lane specifier if present.
2732    VectorLaneTy NextLaneKind;
2733    unsigned NextLaneIndex;
2734    SMLoc EndLoc = Parser.getTok().getLoc();
2735    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2736      return MatchOperand_ParseFail;
2737    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2738      Error(EndLoc, "mismatched lane index in register list");
2739      return MatchOperand_ParseFail;
2740    }
2741  }
2742
2743  SMLoc E = Parser.getTok().getLoc();
2744  if (Parser.getTok().isNot(AsmToken::RCurly)) {
2745    Error(E, "'}' expected");
2746    return MatchOperand_ParseFail;
2747  }
2748  Parser.Lex(); // Eat '}' token.
2749
2750  switch (LaneKind) {
2751  default:
2752    assert(0 && "unexpected lane kind in register list.");
2753  case NoLanes:
2754    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E));
2755    break;
2756  case AllLanes:
2757    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
2758                                                            S, E));
2759    break;
2760  case IndexedLane:
2761    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
2762                                                           LaneIndex, S, E));
2763    break;
2764  }
2765  return MatchOperand_Success;
2766}
2767
2768/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
2769ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2770parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2771  SMLoc S = Parser.getTok().getLoc();
2772  const AsmToken &Tok = Parser.getTok();
2773  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2774  StringRef OptStr = Tok.getString();
2775
2776  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
2777    .Case("sy",    ARM_MB::SY)
2778    .Case("st",    ARM_MB::ST)
2779    .Case("sh",    ARM_MB::ISH)
2780    .Case("ish",   ARM_MB::ISH)
2781    .Case("shst",  ARM_MB::ISHST)
2782    .Case("ishst", ARM_MB::ISHST)
2783    .Case("nsh",   ARM_MB::NSH)
2784    .Case("un",    ARM_MB::NSH)
2785    .Case("nshst", ARM_MB::NSHST)
2786    .Case("unst",  ARM_MB::NSHST)
2787    .Case("osh",   ARM_MB::OSH)
2788    .Case("oshst", ARM_MB::OSHST)
2789    .Default(~0U);
2790
2791  if (Opt == ~0U)
2792    return MatchOperand_NoMatch;
2793
2794  Parser.Lex(); // Eat identifier token.
2795  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
2796  return MatchOperand_Success;
2797}
2798
2799/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
2800ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2801parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2802  SMLoc S = Parser.getTok().getLoc();
2803  const AsmToken &Tok = Parser.getTok();
2804  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2805  StringRef IFlagsStr = Tok.getString();
2806
2807  // An iflags string of "none" is interpreted to mean that none of the AIF
2808  // bits are set.  Not a terribly useful instruction, but a valid encoding.
2809  unsigned IFlags = 0;
2810  if (IFlagsStr != "none") {
2811        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
2812      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
2813        .Case("a", ARM_PROC::A)
2814        .Case("i", ARM_PROC::I)
2815        .Case("f", ARM_PROC::F)
2816        .Default(~0U);
2817
2818      // If some specific iflag is already set, it means that some letter is
2819      // present more than once, this is not acceptable.
2820      if (Flag == ~0U || (IFlags & Flag))
2821        return MatchOperand_NoMatch;
2822
2823      IFlags |= Flag;
2824    }
2825  }
2826
2827  Parser.Lex(); // Eat identifier token.
2828  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
2829  return MatchOperand_Success;
2830}
2831
2832/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
2833ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2834parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2835  SMLoc S = Parser.getTok().getLoc();
2836  const AsmToken &Tok = Parser.getTok();
2837  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2838  StringRef Mask = Tok.getString();
2839
2840  if (isMClass()) {
2841    // See ARMv6-M 10.1.1
2842    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
2843      .Case("apsr", 0)
2844      .Case("iapsr", 1)
2845      .Case("eapsr", 2)
2846      .Case("xpsr", 3)
2847      .Case("ipsr", 5)
2848      .Case("epsr", 6)
2849      .Case("iepsr", 7)
2850      .Case("msp", 8)
2851      .Case("psp", 9)
2852      .Case("primask", 16)
2853      .Case("basepri", 17)
2854      .Case("basepri_max", 18)
2855      .Case("faultmask", 19)
2856      .Case("control", 20)
2857      .Default(~0U);
2858
2859    if (FlagsVal == ~0U)
2860      return MatchOperand_NoMatch;
2861
2862    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
2863      // basepri, basepri_max and faultmask only valid for V7m.
2864      return MatchOperand_NoMatch;
2865
2866    Parser.Lex(); // Eat identifier token.
2867    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
2868    return MatchOperand_Success;
2869  }
2870
2871  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
2872  size_t Start = 0, Next = Mask.find('_');
2873  StringRef Flags = "";
2874  std::string SpecReg = Mask.slice(Start, Next).lower();
2875  if (Next != StringRef::npos)
2876    Flags = Mask.slice(Next+1, Mask.size());
2877
2878  // FlagsVal contains the complete mask:
2879  // 3-0: Mask
2880  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
2881  unsigned FlagsVal = 0;
2882
2883  if (SpecReg == "apsr") {
2884    FlagsVal = StringSwitch<unsigned>(Flags)
2885    .Case("nzcvq",  0x8) // same as CPSR_f
2886    .Case("g",      0x4) // same as CPSR_s
2887    .Case("nzcvqg", 0xc) // same as CPSR_fs
2888    .Default(~0U);
2889
2890    if (FlagsVal == ~0U) {
2891      if (!Flags.empty())
2892        return MatchOperand_NoMatch;
2893      else
2894        FlagsVal = 8; // No flag
2895    }
2896  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
2897    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
2898      Flags = "fc";
2899    for (int i = 0, e = Flags.size(); i != e; ++i) {
2900      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
2901      .Case("c", 1)
2902      .Case("x", 2)
2903      .Case("s", 4)
2904      .Case("f", 8)
2905      .Default(~0U);
2906
2907      // If some specific flag is already set, it means that some letter is
2908      // present more than once, this is not acceptable.
2909      if (FlagsVal == ~0U || (FlagsVal & Flag))
2910        return MatchOperand_NoMatch;
2911      FlagsVal |= Flag;
2912    }
2913  } else // No match for special register.
2914    return MatchOperand_NoMatch;
2915
2916  // Special register without flags is NOT equivalent to "fc" flags.
2917  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
2918  // two lines would enable gas compatibility at the expense of breaking
2919  // round-tripping.
2920  //
2921  // if (!FlagsVal)
2922  //  FlagsVal = 0x9;
2923
2924  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
2925  if (SpecReg == "spsr")
2926    FlagsVal |= 16;
2927
2928  Parser.Lex(); // Eat identifier token.
2929  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
2930  return MatchOperand_Success;
2931}
2932
2933ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2934parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
2935            int Low, int High) {
2936  const AsmToken &Tok = Parser.getTok();
2937  if (Tok.isNot(AsmToken::Identifier)) {
2938    Error(Parser.getTok().getLoc(), Op + " operand expected.");
2939    return MatchOperand_ParseFail;
2940  }
2941  StringRef ShiftName = Tok.getString();
2942  std::string LowerOp = Op.lower();
2943  std::string UpperOp = Op.upper();
2944  if (ShiftName != LowerOp && ShiftName != UpperOp) {
2945    Error(Parser.getTok().getLoc(), Op + " operand expected.");
2946    return MatchOperand_ParseFail;
2947  }
2948  Parser.Lex(); // Eat shift type token.
2949
2950  // There must be a '#' and a shift amount.
2951  if (Parser.getTok().isNot(AsmToken::Hash)) {
2952    Error(Parser.getTok().getLoc(), "'#' expected");
2953    return MatchOperand_ParseFail;
2954  }
2955  Parser.Lex(); // Eat hash token.
2956
2957  const MCExpr *ShiftAmount;
2958  SMLoc Loc = Parser.getTok().getLoc();
2959  if (getParser().ParseExpression(ShiftAmount)) {
2960    Error(Loc, "illegal expression");
2961    return MatchOperand_ParseFail;
2962  }
2963  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
2964  if (!CE) {
2965    Error(Loc, "constant expression expected");
2966    return MatchOperand_ParseFail;
2967  }
2968  int Val = CE->getValue();
2969  if (Val < Low || Val > High) {
2970    Error(Loc, "immediate value out of range");
2971    return MatchOperand_ParseFail;
2972  }
2973
2974  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
2975
2976  return MatchOperand_Success;
2977}
2978
2979ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2980parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2981  const AsmToken &Tok = Parser.getTok();
2982  SMLoc S = Tok.getLoc();
2983  if (Tok.isNot(AsmToken::Identifier)) {
2984    Error(Tok.getLoc(), "'be' or 'le' operand expected");
2985    return MatchOperand_ParseFail;
2986  }
2987  int Val = StringSwitch<int>(Tok.getString())
2988    .Case("be", 1)
2989    .Case("le", 0)
2990    .Default(-1);
2991  Parser.Lex(); // Eat the token.
2992
2993  if (Val == -1) {
2994    Error(Tok.getLoc(), "'be' or 'le' operand expected");
2995    return MatchOperand_ParseFail;
2996  }
2997  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
2998                                                                  getContext()),
2999                                           S, Parser.getTok().getLoc()));
3000  return MatchOperand_Success;
3001}
3002
3003/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
3004/// instructions. Legal values are:
3005///     lsl #n  'n' in [0,31]
3006///     asr #n  'n' in [1,32]
3007///             n == 32 encoded as n == 0.
3008ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3009parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3010  const AsmToken &Tok = Parser.getTok();
3011  SMLoc S = Tok.getLoc();
3012  if (Tok.isNot(AsmToken::Identifier)) {
3013    Error(S, "shift operator 'asr' or 'lsl' expected");
3014    return MatchOperand_ParseFail;
3015  }
3016  StringRef ShiftName = Tok.getString();
3017  bool isASR;
3018  if (ShiftName == "lsl" || ShiftName == "LSL")
3019    isASR = false;
3020  else if (ShiftName == "asr" || ShiftName == "ASR")
3021    isASR = true;
3022  else {
3023    Error(S, "shift operator 'asr' or 'lsl' expected");
3024    return MatchOperand_ParseFail;
3025  }
3026  Parser.Lex(); // Eat the operator.
3027
3028  // A '#' and a shift amount.
3029  if (Parser.getTok().isNot(AsmToken::Hash)) {
3030    Error(Parser.getTok().getLoc(), "'#' expected");
3031    return MatchOperand_ParseFail;
3032  }
3033  Parser.Lex(); // Eat hash token.
3034
3035  const MCExpr *ShiftAmount;
3036  SMLoc E = Parser.getTok().getLoc();
3037  if (getParser().ParseExpression(ShiftAmount)) {
3038    Error(E, "malformed shift expression");
3039    return MatchOperand_ParseFail;
3040  }
3041  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3042  if (!CE) {
3043    Error(E, "shift amount must be an immediate");
3044    return MatchOperand_ParseFail;
3045  }
3046
3047  int64_t Val = CE->getValue();
3048  if (isASR) {
3049    // Shift amount must be in [1,32]
3050    if (Val < 1 || Val > 32) {
3051      Error(E, "'asr' shift amount must be in range [1,32]");
3052      return MatchOperand_ParseFail;
3053    }
3054    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3055    if (isThumb() && Val == 32) {
3056      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3057      return MatchOperand_ParseFail;
3058    }
3059    if (Val == 32) Val = 0;
3060  } else {
3061    // Shift amount must be in [1,32]
3062    if (Val < 0 || Val > 31) {
3063      Error(E, "'lsr' shift amount must be in range [0,31]");
3064      return MatchOperand_ParseFail;
3065    }
3066  }
3067
3068  E = Parser.getTok().getLoc();
3069  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3070
3071  return MatchOperand_Success;
3072}
3073
3074/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3075/// of instructions. Legal values are:
3076///     ror #n  'n' in {0, 8, 16, 24}
3077ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3078parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3079  const AsmToken &Tok = Parser.getTok();
3080  SMLoc S = Tok.getLoc();
3081  if (Tok.isNot(AsmToken::Identifier))
3082    return MatchOperand_NoMatch;
3083  StringRef ShiftName = Tok.getString();
3084  if (ShiftName != "ror" && ShiftName != "ROR")
3085    return MatchOperand_NoMatch;
3086  Parser.Lex(); // Eat the operator.
3087
3088  // A '#' and a rotate amount.
3089  if (Parser.getTok().isNot(AsmToken::Hash)) {
3090    Error(Parser.getTok().getLoc(), "'#' expected");
3091    return MatchOperand_ParseFail;
3092  }
3093  Parser.Lex(); // Eat hash token.
3094
3095  const MCExpr *ShiftAmount;
3096  SMLoc E = Parser.getTok().getLoc();
3097  if (getParser().ParseExpression(ShiftAmount)) {
3098    Error(E, "malformed rotate expression");
3099    return MatchOperand_ParseFail;
3100  }
3101  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3102  if (!CE) {
3103    Error(E, "rotate amount must be an immediate");
3104    return MatchOperand_ParseFail;
3105  }
3106
3107  int64_t Val = CE->getValue();
3108  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3109  // normally, zero is represented in asm by omitting the rotate operand
3110  // entirely.
3111  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3112    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3113    return MatchOperand_ParseFail;
3114  }
3115
3116  E = Parser.getTok().getLoc();
3117  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3118
3119  return MatchOperand_Success;
3120}
3121
3122ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3123parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3124  SMLoc S = Parser.getTok().getLoc();
3125  // The bitfield descriptor is really two operands, the LSB and the width.
3126  if (Parser.getTok().isNot(AsmToken::Hash)) {
3127    Error(Parser.getTok().getLoc(), "'#' expected");
3128    return MatchOperand_ParseFail;
3129  }
3130  Parser.Lex(); // Eat hash token.
3131
3132  const MCExpr *LSBExpr;
3133  SMLoc E = Parser.getTok().getLoc();
3134  if (getParser().ParseExpression(LSBExpr)) {
3135    Error(E, "malformed immediate expression");
3136    return MatchOperand_ParseFail;
3137  }
3138  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3139  if (!CE) {
3140    Error(E, "'lsb' operand must be an immediate");
3141    return MatchOperand_ParseFail;
3142  }
3143
3144  int64_t LSB = CE->getValue();
3145  // The LSB must be in the range [0,31]
3146  if (LSB < 0 || LSB > 31) {
3147    Error(E, "'lsb' operand must be in the range [0,31]");
3148    return MatchOperand_ParseFail;
3149  }
3150  E = Parser.getTok().getLoc();
3151
3152  // Expect another immediate operand.
3153  if (Parser.getTok().isNot(AsmToken::Comma)) {
3154    Error(Parser.getTok().getLoc(), "too few operands");
3155    return MatchOperand_ParseFail;
3156  }
3157  Parser.Lex(); // Eat hash token.
3158  if (Parser.getTok().isNot(AsmToken::Hash)) {
3159    Error(Parser.getTok().getLoc(), "'#' expected");
3160    return MatchOperand_ParseFail;
3161  }
3162  Parser.Lex(); // Eat hash token.
3163
3164  const MCExpr *WidthExpr;
3165  if (getParser().ParseExpression(WidthExpr)) {
3166    Error(E, "malformed immediate expression");
3167    return MatchOperand_ParseFail;
3168  }
3169  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3170  if (!CE) {
3171    Error(E, "'width' operand must be an immediate");
3172    return MatchOperand_ParseFail;
3173  }
3174
3175  int64_t Width = CE->getValue();
3176  // The LSB must be in the range [1,32-lsb]
3177  if (Width < 1 || Width > 32 - LSB) {
3178    Error(E, "'width' operand must be in the range [1,32-lsb]");
3179    return MatchOperand_ParseFail;
3180  }
3181  E = Parser.getTok().getLoc();
3182
3183  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3184
3185  return MatchOperand_Success;
3186}
3187
3188ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3189parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3190  // Check for a post-index addressing register operand. Specifically:
3191  // postidx_reg := '+' register {, shift}
3192  //              | '-' register {, shift}
3193  //              | register {, shift}
3194
3195  // This method must return MatchOperand_NoMatch without consuming any tokens
3196  // in the case where there is no match, as other alternatives take other
3197  // parse methods.
3198  AsmToken Tok = Parser.getTok();
3199  SMLoc S = Tok.getLoc();
3200  bool haveEaten = false;
3201  bool isAdd = true;
3202  int Reg = -1;
3203  if (Tok.is(AsmToken::Plus)) {
3204    Parser.Lex(); // Eat the '+' token.
3205    haveEaten = true;
3206  } else if (Tok.is(AsmToken::Minus)) {
3207    Parser.Lex(); // Eat the '-' token.
3208    isAdd = false;
3209    haveEaten = true;
3210  }
3211  if (Parser.getTok().is(AsmToken::Identifier))
3212    Reg = tryParseRegister();
3213  if (Reg == -1) {
3214    if (!haveEaten)
3215      return MatchOperand_NoMatch;
3216    Error(Parser.getTok().getLoc(), "register expected");
3217    return MatchOperand_ParseFail;
3218  }
3219  SMLoc E = Parser.getTok().getLoc();
3220
3221  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3222  unsigned ShiftImm = 0;
3223  if (Parser.getTok().is(AsmToken::Comma)) {
3224    Parser.Lex(); // Eat the ','.
3225    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3226      return MatchOperand_ParseFail;
3227  }
3228
3229  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3230                                                  ShiftImm, S, E));
3231
3232  return MatchOperand_Success;
3233}
3234
3235ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3236parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3237  // Check for a post-index addressing register operand. Specifically:
3238  // am3offset := '+' register
3239  //              | '-' register
3240  //              | register
3241  //              | # imm
3242  //              | # + imm
3243  //              | # - imm
3244
3245  // This method must return MatchOperand_NoMatch without consuming any tokens
3246  // in the case where there is no match, as other alternatives take other
3247  // parse methods.
3248  AsmToken Tok = Parser.getTok();
3249  SMLoc S = Tok.getLoc();
3250
3251  // Do immediates first, as we always parse those if we have a '#'.
3252  if (Parser.getTok().is(AsmToken::Hash)) {
3253    Parser.Lex(); // Eat the '#'.
3254    // Explicitly look for a '-', as we need to encode negative zero
3255    // differently.
3256    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3257    const MCExpr *Offset;
3258    if (getParser().ParseExpression(Offset))
3259      return MatchOperand_ParseFail;
3260    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3261    if (!CE) {
3262      Error(S, "constant expression expected");
3263      return MatchOperand_ParseFail;
3264    }
3265    SMLoc E = Tok.getLoc();
3266    // Negative zero is encoded as the flag value INT32_MIN.
3267    int32_t Val = CE->getValue();
3268    if (isNegative && Val == 0)
3269      Val = INT32_MIN;
3270
3271    Operands.push_back(
3272      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3273
3274    return MatchOperand_Success;
3275  }
3276
3277
3278  bool haveEaten = false;
3279  bool isAdd = true;
3280  int Reg = -1;
3281  if (Tok.is(AsmToken::Plus)) {
3282    Parser.Lex(); // Eat the '+' token.
3283    haveEaten = true;
3284  } else if (Tok.is(AsmToken::Minus)) {
3285    Parser.Lex(); // Eat the '-' token.
3286    isAdd = false;
3287    haveEaten = true;
3288  }
3289  if (Parser.getTok().is(AsmToken::Identifier))
3290    Reg = tryParseRegister();
3291  if (Reg == -1) {
3292    if (!haveEaten)
3293      return MatchOperand_NoMatch;
3294    Error(Parser.getTok().getLoc(), "register expected");
3295    return MatchOperand_ParseFail;
3296  }
3297  SMLoc E = Parser.getTok().getLoc();
3298
3299  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3300                                                  0, S, E));
3301
3302  return MatchOperand_Success;
3303}
3304
3305/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3306/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3307/// when they refer multiple MIOperands inside a single one.
3308bool ARMAsmParser::
3309cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3310             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3311  // Rt, Rt2
3312  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3313  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3314  // Create a writeback register dummy placeholder.
3315  Inst.addOperand(MCOperand::CreateReg(0));
3316  // addr
3317  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3318  // pred
3319  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3320  return true;
3321}
3322
3323/// cvtT2StrdPre - Convert parsed operands to MCInst.
3324/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3325/// when they refer multiple MIOperands inside a single one.
3326bool ARMAsmParser::
3327cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3328             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3329  // Create a writeback register dummy placeholder.
3330  Inst.addOperand(MCOperand::CreateReg(0));
3331  // Rt, Rt2
3332  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3333  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3334  // addr
3335  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3336  // pred
3337  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3338  return true;
3339}
3340
3341/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3342/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3343/// when they refer multiple MIOperands inside a single one.
3344bool ARMAsmParser::
3345cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3346                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3347  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3348
3349  // Create a writeback register dummy placeholder.
3350  Inst.addOperand(MCOperand::CreateImm(0));
3351
3352  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3353  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3354  return true;
3355}
3356
3357/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3358/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3359/// when they refer multiple MIOperands inside a single one.
3360bool ARMAsmParser::
3361cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3362                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3363  // Create a writeback register dummy placeholder.
3364  Inst.addOperand(MCOperand::CreateImm(0));
3365  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3366  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3367  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3368  return true;
3369}
3370
3371/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3372/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3373/// when they refer multiple MIOperands inside a single one.
3374bool ARMAsmParser::
3375cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3376                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3377  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3378
3379  // Create a writeback register dummy placeholder.
3380  Inst.addOperand(MCOperand::CreateImm(0));
3381
3382  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3383  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3384  return true;
3385}
3386
3387/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3388/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3389/// when they refer multiple MIOperands inside a single one.
3390bool ARMAsmParser::
3391cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3392                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3393  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3394
3395  // Create a writeback register dummy placeholder.
3396  Inst.addOperand(MCOperand::CreateImm(0));
3397
3398  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3399  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3400  return true;
3401}
3402
3403
3404/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3405/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3406/// when they refer multiple MIOperands inside a single one.
3407bool ARMAsmParser::
3408cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3409                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3410  // Create a writeback register dummy placeholder.
3411  Inst.addOperand(MCOperand::CreateImm(0));
3412  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3413  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3414  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3415  return true;
3416}
3417
3418/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3419/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3420/// when they refer multiple MIOperands inside a single one.
3421bool ARMAsmParser::
3422cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3423                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3424  // Create a writeback register dummy placeholder.
3425  Inst.addOperand(MCOperand::CreateImm(0));
3426  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3427  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3428  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3429  return true;
3430}
3431
3432/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3433/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3434/// when they refer multiple MIOperands inside a single one.
3435bool ARMAsmParser::
3436cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3437                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3438  // Create a writeback register dummy placeholder.
3439  Inst.addOperand(MCOperand::CreateImm(0));
3440  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3441  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3442  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3443  return true;
3444}
3445
3446/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3447/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3448/// when they refer multiple MIOperands inside a single one.
3449bool ARMAsmParser::
3450cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3451                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3452  // Rt
3453  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3454  // Create a writeback register dummy placeholder.
3455  Inst.addOperand(MCOperand::CreateImm(0));
3456  // addr
3457  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3458  // offset
3459  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3460  // pred
3461  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3462  return true;
3463}
3464
3465/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3466/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3467/// when they refer multiple MIOperands inside a single one.
3468bool ARMAsmParser::
3469cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3470                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3471  // Rt
3472  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3473  // Create a writeback register dummy placeholder.
3474  Inst.addOperand(MCOperand::CreateImm(0));
3475  // addr
3476  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3477  // offset
3478  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3479  // pred
3480  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3481  return true;
3482}
3483
3484/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3485/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3486/// when they refer multiple MIOperands inside a single one.
3487bool ARMAsmParser::
3488cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3489                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3490  // Create a writeback register dummy placeholder.
3491  Inst.addOperand(MCOperand::CreateImm(0));
3492  // Rt
3493  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3494  // addr
3495  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3496  // offset
3497  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3498  // pred
3499  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3500  return true;
3501}
3502
3503/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3504/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3505/// when they refer multiple MIOperands inside a single one.
3506bool ARMAsmParser::
3507cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3508                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3509  // Create a writeback register dummy placeholder.
3510  Inst.addOperand(MCOperand::CreateImm(0));
3511  // Rt
3512  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3513  // addr
3514  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3515  // offset
3516  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3517  // pred
3518  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3519  return true;
3520}
3521
3522/// cvtLdrdPre - Convert parsed operands to MCInst.
3523/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3524/// when they refer multiple MIOperands inside a single one.
3525bool ARMAsmParser::
3526cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3527           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3528  // Rt, Rt2
3529  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3530  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3531  // Create a writeback register dummy placeholder.
3532  Inst.addOperand(MCOperand::CreateImm(0));
3533  // addr
3534  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3535  // pred
3536  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3537  return true;
3538}
3539
3540/// cvtStrdPre - Convert parsed operands to MCInst.
3541/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3542/// when they refer multiple MIOperands inside a single one.
3543bool ARMAsmParser::
3544cvtStrdPre(MCInst &Inst, unsigned Opcode,
3545           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3546  // Create a writeback register dummy placeholder.
3547  Inst.addOperand(MCOperand::CreateImm(0));
3548  // Rt, Rt2
3549  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3550  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3551  // addr
3552  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3553  // pred
3554  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3555  return true;
3556}
3557
3558/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3559/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3560/// when they refer multiple MIOperands inside a single one.
3561bool ARMAsmParser::
3562cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3563                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3564  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3565  // Create a writeback register dummy placeholder.
3566  Inst.addOperand(MCOperand::CreateImm(0));
3567  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3568  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3569  return true;
3570}
3571
3572/// cvtThumbMultiple- Convert parsed operands to MCInst.
3573/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3574/// when they refer multiple MIOperands inside a single one.
3575bool ARMAsmParser::
3576cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3577           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3578  // The second source operand must be the same register as the destination
3579  // operand.
3580  if (Operands.size() == 6 &&
3581      (((ARMOperand*)Operands[3])->getReg() !=
3582       ((ARMOperand*)Operands[5])->getReg()) &&
3583      (((ARMOperand*)Operands[3])->getReg() !=
3584       ((ARMOperand*)Operands[4])->getReg())) {
3585    Error(Operands[3]->getStartLoc(),
3586          "destination register must match source register");
3587    return false;
3588  }
3589  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3590  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3591  // If we have a three-operand form, make sure to set Rn to be the operand
3592  // that isn't the same as Rd.
3593  unsigned RegOp = 4;
3594  if (Operands.size() == 6 &&
3595      ((ARMOperand*)Operands[4])->getReg() ==
3596        ((ARMOperand*)Operands[3])->getReg())
3597    RegOp = 5;
3598  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3599  Inst.addOperand(Inst.getOperand(0));
3600  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3601
3602  return true;
3603}
3604
3605bool ARMAsmParser::
3606cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3607              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3608  // Vd
3609  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3610  // Create a writeback register dummy placeholder.
3611  Inst.addOperand(MCOperand::CreateImm(0));
3612  // Vn
3613  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3614  // pred
3615  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3616  return true;
3617}
3618
3619bool ARMAsmParser::
3620cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3621                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3622  // Vd
3623  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3624  // Create a writeback register dummy placeholder.
3625  Inst.addOperand(MCOperand::CreateImm(0));
3626  // Vn
3627  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3628  // Vm
3629  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3630  // pred
3631  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3632  return true;
3633}
3634
3635bool ARMAsmParser::
3636cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3637              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3638  // Create a writeback register dummy placeholder.
3639  Inst.addOperand(MCOperand::CreateImm(0));
3640  // Vn
3641  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3642  // Vt
3643  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3644  // pred
3645  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3646  return true;
3647}
3648
3649bool ARMAsmParser::
3650cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3651                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3652  // Create a writeback register dummy placeholder.
3653  Inst.addOperand(MCOperand::CreateImm(0));
3654  // Vn
3655  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3656  // Vm
3657  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3658  // Vt
3659  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3660  // pred
3661  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3662  return true;
3663}
3664
3665/// Parse an ARM memory expression, return false if successful else return true
3666/// or an error.  The first token must be a '[' when called.
3667bool ARMAsmParser::
3668parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3669  SMLoc S, E;
3670  assert(Parser.getTok().is(AsmToken::LBrac) &&
3671         "Token is not a Left Bracket");
3672  S = Parser.getTok().getLoc();
3673  Parser.Lex(); // Eat left bracket token.
3674
3675  const AsmToken &BaseRegTok = Parser.getTok();
3676  int BaseRegNum = tryParseRegister();
3677  if (BaseRegNum == -1)
3678    return Error(BaseRegTok.getLoc(), "register expected");
3679
3680  // The next token must either be a comma or a closing bracket.
3681  const AsmToken &Tok = Parser.getTok();
3682  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3683    return Error(Tok.getLoc(), "malformed memory operand");
3684
3685  if (Tok.is(AsmToken::RBrac)) {
3686    E = Tok.getLoc();
3687    Parser.Lex(); // Eat right bracket token.
3688
3689    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3690                                             0, 0, false, S, E));
3691
3692    // If there's a pre-indexing writeback marker, '!', just add it as a token
3693    // operand. It's rather odd, but syntactically valid.
3694    if (Parser.getTok().is(AsmToken::Exclaim)) {
3695      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3696      Parser.Lex(); // Eat the '!'.
3697    }
3698
3699    return false;
3700  }
3701
3702  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
3703  Parser.Lex(); // Eat the comma.
3704
3705  // If we have a ':', it's an alignment specifier.
3706  if (Parser.getTok().is(AsmToken::Colon)) {
3707    Parser.Lex(); // Eat the ':'.
3708    E = Parser.getTok().getLoc();
3709
3710    const MCExpr *Expr;
3711    if (getParser().ParseExpression(Expr))
3712     return true;
3713
3714    // The expression has to be a constant. Memory references with relocations
3715    // don't come through here, as they use the <label> forms of the relevant
3716    // instructions.
3717    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3718    if (!CE)
3719      return Error (E, "constant expression expected");
3720
3721    unsigned Align = 0;
3722    switch (CE->getValue()) {
3723    default:
3724      return Error(E, "alignment specifier must be 64, 128, or 256 bits");
3725    case 64:  Align = 8; break;
3726    case 128: Align = 16; break;
3727    case 256: Align = 32; break;
3728    }
3729
3730    // Now we should have the closing ']'
3731    E = Parser.getTok().getLoc();
3732    if (Parser.getTok().isNot(AsmToken::RBrac))
3733      return Error(E, "']' expected");
3734    Parser.Lex(); // Eat right bracket token.
3735
3736    // Don't worry about range checking the value here. That's handled by
3737    // the is*() predicates.
3738    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
3739                                             ARM_AM::no_shift, 0, Align,
3740                                             false, S, E));
3741
3742    // If there's a pre-indexing writeback marker, '!', just add it as a token
3743    // operand.
3744    if (Parser.getTok().is(AsmToken::Exclaim)) {
3745      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3746      Parser.Lex(); // Eat the '!'.
3747    }
3748
3749    return false;
3750  }
3751
3752  // If we have a '#', it's an immediate offset, else assume it's a register
3753  // offset. Be friendly and also accept a plain integer (without a leading
3754  // hash) for gas compatibility.
3755  if (Parser.getTok().is(AsmToken::Hash) ||
3756      Parser.getTok().is(AsmToken::Integer)) {
3757    if (Parser.getTok().is(AsmToken::Hash))
3758      Parser.Lex(); // Eat the '#'.
3759    E = Parser.getTok().getLoc();
3760
3761    bool isNegative = getParser().getTok().is(AsmToken::Minus);
3762    const MCExpr *Offset;
3763    if (getParser().ParseExpression(Offset))
3764     return true;
3765
3766    // The expression has to be a constant. Memory references with relocations
3767    // don't come through here, as they use the <label> forms of the relevant
3768    // instructions.
3769    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3770    if (!CE)
3771      return Error (E, "constant expression expected");
3772
3773    // If the constant was #-0, represent it as INT32_MIN.
3774    int32_t Val = CE->getValue();
3775    if (isNegative && Val == 0)
3776      CE = MCConstantExpr::Create(INT32_MIN, getContext());
3777
3778    // Now we should have the closing ']'
3779    E = Parser.getTok().getLoc();
3780    if (Parser.getTok().isNot(AsmToken::RBrac))
3781      return Error(E, "']' expected");
3782    Parser.Lex(); // Eat right bracket token.
3783
3784    // Don't worry about range checking the value here. That's handled by
3785    // the is*() predicates.
3786    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
3787                                             ARM_AM::no_shift, 0, 0,
3788                                             false, S, E));
3789
3790    // If there's a pre-indexing writeback marker, '!', just add it as a token
3791    // operand.
3792    if (Parser.getTok().is(AsmToken::Exclaim)) {
3793      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3794      Parser.Lex(); // Eat the '!'.
3795    }
3796
3797    return false;
3798  }
3799
3800  // The register offset is optionally preceded by a '+' or '-'
3801  bool isNegative = false;
3802  if (Parser.getTok().is(AsmToken::Minus)) {
3803    isNegative = true;
3804    Parser.Lex(); // Eat the '-'.
3805  } else if (Parser.getTok().is(AsmToken::Plus)) {
3806    // Nothing to do.
3807    Parser.Lex(); // Eat the '+'.
3808  }
3809
3810  E = Parser.getTok().getLoc();
3811  int OffsetRegNum = tryParseRegister();
3812  if (OffsetRegNum == -1)
3813    return Error(E, "register expected");
3814
3815  // If there's a shift operator, handle it.
3816  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
3817  unsigned ShiftImm = 0;
3818  if (Parser.getTok().is(AsmToken::Comma)) {
3819    Parser.Lex(); // Eat the ','.
3820    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
3821      return true;
3822  }
3823
3824  // Now we should have the closing ']'
3825  E = Parser.getTok().getLoc();
3826  if (Parser.getTok().isNot(AsmToken::RBrac))
3827    return Error(E, "']' expected");
3828  Parser.Lex(); // Eat right bracket token.
3829
3830  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
3831                                           ShiftType, ShiftImm, 0, isNegative,
3832                                           S, E));
3833
3834  // If there's a pre-indexing writeback marker, '!', just add it as a token
3835  // operand.
3836  if (Parser.getTok().is(AsmToken::Exclaim)) {
3837    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3838    Parser.Lex(); // Eat the '!'.
3839  }
3840
3841  return false;
3842}
3843
3844/// parseMemRegOffsetShift - one of these two:
3845///   ( lsl | lsr | asr | ror ) , # shift_amount
3846///   rrx
3847/// return true if it parses a shift otherwise it returns false.
3848bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
3849                                          unsigned &Amount) {
3850  SMLoc Loc = Parser.getTok().getLoc();
3851  const AsmToken &Tok = Parser.getTok();
3852  if (Tok.isNot(AsmToken::Identifier))
3853    return true;
3854  StringRef ShiftName = Tok.getString();
3855  if (ShiftName == "lsl" || ShiftName == "LSL")
3856    St = ARM_AM::lsl;
3857  else if (ShiftName == "lsr" || ShiftName == "LSR")
3858    St = ARM_AM::lsr;
3859  else if (ShiftName == "asr" || ShiftName == "ASR")
3860    St = ARM_AM::asr;
3861  else if (ShiftName == "ror" || ShiftName == "ROR")
3862    St = ARM_AM::ror;
3863  else if (ShiftName == "rrx" || ShiftName == "RRX")
3864    St = ARM_AM::rrx;
3865  else
3866    return Error(Loc, "illegal shift operator");
3867  Parser.Lex(); // Eat shift type token.
3868
3869  // rrx stands alone.
3870  Amount = 0;
3871  if (St != ARM_AM::rrx) {
3872    Loc = Parser.getTok().getLoc();
3873    // A '#' and a shift amount.
3874    const AsmToken &HashTok = Parser.getTok();
3875    if (HashTok.isNot(AsmToken::Hash))
3876      return Error(HashTok.getLoc(), "'#' expected");
3877    Parser.Lex(); // Eat hash token.
3878
3879    const MCExpr *Expr;
3880    if (getParser().ParseExpression(Expr))
3881      return true;
3882    // Range check the immediate.
3883    // lsl, ror: 0 <= imm <= 31
3884    // lsr, asr: 0 <= imm <= 32
3885    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3886    if (!CE)
3887      return Error(Loc, "shift amount must be an immediate");
3888    int64_t Imm = CE->getValue();
3889    if (Imm < 0 ||
3890        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
3891        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
3892      return Error(Loc, "immediate shift value out of range");
3893    Amount = Imm;
3894  }
3895
3896  return false;
3897}
3898
3899/// parseFPImm - A floating point immediate expression operand.
3900ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3901parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3902  SMLoc S = Parser.getTok().getLoc();
3903
3904  if (Parser.getTok().isNot(AsmToken::Hash))
3905    return MatchOperand_NoMatch;
3906
3907  // Disambiguate the VMOV forms that can accept an FP immediate.
3908  // vmov.f32 <sreg>, #imm
3909  // vmov.f64 <dreg>, #imm
3910  // vmov.f32 <dreg>, #imm  @ vector f32x2
3911  // vmov.f32 <qreg>, #imm  @ vector f32x4
3912  //
3913  // There are also the NEON VMOV instructions which expect an
3914  // integer constant. Make sure we don't try to parse an FPImm
3915  // for these:
3916  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
3917  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
3918  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
3919                           TyOp->getToken() != ".f64"))
3920    return MatchOperand_NoMatch;
3921
3922  Parser.Lex(); // Eat the '#'.
3923
3924  // Handle negation, as that still comes through as a separate token.
3925  bool isNegative = false;
3926  if (Parser.getTok().is(AsmToken::Minus)) {
3927    isNegative = true;
3928    Parser.Lex();
3929  }
3930  const AsmToken &Tok = Parser.getTok();
3931  if (Tok.is(AsmToken::Real)) {
3932    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3933    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3934    // If we had a '-' in front, toggle the sign bit.
3935    IntVal ^= (uint64_t)isNegative << 63;
3936    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
3937    Parser.Lex(); // Eat the token.
3938    if (Val == -1) {
3939      TokError("floating point value out of range");
3940      return MatchOperand_ParseFail;
3941    }
3942    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
3943    return MatchOperand_Success;
3944  }
3945  if (Tok.is(AsmToken::Integer)) {
3946    int64_t Val = Tok.getIntVal();
3947    Parser.Lex(); // Eat the token.
3948    if (Val > 255 || Val < 0) {
3949      TokError("encoded floating point value out of range");
3950      return MatchOperand_ParseFail;
3951    }
3952    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
3953    return MatchOperand_Success;
3954  }
3955
3956  TokError("invalid floating point immediate");
3957  return MatchOperand_ParseFail;
3958}
3959/// Parse a arm instruction operand.  For now this parses the operand regardless
3960/// of the mnemonic.
3961bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
3962                                StringRef Mnemonic) {
3963  SMLoc S, E;
3964
3965  // Check if the current operand has a custom associated parser, if so, try to
3966  // custom parse the operand, or fallback to the general approach.
3967  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3968  if (ResTy == MatchOperand_Success)
3969    return false;
3970  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3971  // there was a match, but an error occurred, in which case, just return that
3972  // the operand parsing failed.
3973  if (ResTy == MatchOperand_ParseFail)
3974    return true;
3975
3976  switch (getLexer().getKind()) {
3977  default:
3978    Error(Parser.getTok().getLoc(), "unexpected token in operand");
3979    return true;
3980  case AsmToken::Identifier: {
3981    // If this is VMRS, check for the apsr_nzcv operand.
3982    if (!tryParseRegisterWithWriteBack(Operands))
3983      return false;
3984    int Res = tryParseShiftRegister(Operands);
3985    if (Res == 0) // success
3986      return false;
3987    else if (Res == -1) // irrecoverable error
3988      return true;
3989    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
3990      S = Parser.getTok().getLoc();
3991      Parser.Lex();
3992      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
3993      return false;
3994    }
3995
3996    // Fall though for the Identifier case that is not a register or a
3997    // special name.
3998  }
3999  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
4000  case AsmToken::Integer: // things like 1f and 2b as a branch targets
4001  case AsmToken::String:  // quoted label names.
4002  case AsmToken::Dot: {   // . as a branch target
4003    // This was not a register so parse other operands that start with an
4004    // identifier (like labels) as expressions and create them as immediates.
4005    const MCExpr *IdVal;
4006    S = Parser.getTok().getLoc();
4007    if (getParser().ParseExpression(IdVal))
4008      return true;
4009    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4010    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
4011    return false;
4012  }
4013  case AsmToken::LBrac:
4014    return parseMemory(Operands);
4015  case AsmToken::LCurly:
4016    return parseRegisterList(Operands);
4017  case AsmToken::Hash: {
4018    // #42 -> immediate.
4019    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4020    S = Parser.getTok().getLoc();
4021    Parser.Lex();
4022    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4023    const MCExpr *ImmVal;
4024    if (getParser().ParseExpression(ImmVal))
4025      return true;
4026    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4027    if (CE) {
4028      int32_t Val = CE->getValue();
4029      if (isNegative && Val == 0)
4030        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4031    }
4032    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4033    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4034    return false;
4035  }
4036  case AsmToken::Colon: {
4037    // ":lower16:" and ":upper16:" expression prefixes
4038    // FIXME: Check it's an expression prefix,
4039    // e.g. (FOO - :lower16:BAR) isn't legal.
4040    ARMMCExpr::VariantKind RefKind;
4041    if (parsePrefix(RefKind))
4042      return true;
4043
4044    const MCExpr *SubExprVal;
4045    if (getParser().ParseExpression(SubExprVal))
4046      return true;
4047
4048    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4049                                                   getContext());
4050    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4051    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4052    return false;
4053  }
4054  }
4055}
4056
4057// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4058//  :lower16: and :upper16:.
4059bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4060  RefKind = ARMMCExpr::VK_ARM_None;
4061
4062  // :lower16: and :upper16: modifiers
4063  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4064  Parser.Lex(); // Eat ':'
4065
4066  if (getLexer().isNot(AsmToken::Identifier)) {
4067    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4068    return true;
4069  }
4070
4071  StringRef IDVal = Parser.getTok().getIdentifier();
4072  if (IDVal == "lower16") {
4073    RefKind = ARMMCExpr::VK_ARM_LO16;
4074  } else if (IDVal == "upper16") {
4075    RefKind = ARMMCExpr::VK_ARM_HI16;
4076  } else {
4077    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4078    return true;
4079  }
4080  Parser.Lex();
4081
4082  if (getLexer().isNot(AsmToken::Colon)) {
4083    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4084    return true;
4085  }
4086  Parser.Lex(); // Eat the last ':'
4087  return false;
4088}
4089
4090/// \brief Given a mnemonic, split out possible predication code and carry
4091/// setting letters to form a canonical mnemonic and flags.
4092//
4093// FIXME: Would be nice to autogen this.
4094// FIXME: This is a bit of a maze of special cases.
4095StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4096                                      unsigned &PredicationCode,
4097                                      bool &CarrySetting,
4098                                      unsigned &ProcessorIMod,
4099                                      StringRef &ITMask) {
4100  PredicationCode = ARMCC::AL;
4101  CarrySetting = false;
4102  ProcessorIMod = 0;
4103
4104  // Ignore some mnemonics we know aren't predicated forms.
4105  //
4106  // FIXME: Would be nice to autogen this.
4107  if ((Mnemonic == "movs" && isThumb()) ||
4108      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4109      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4110      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4111      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4112      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4113      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4114      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal")
4115    return Mnemonic;
4116
4117  // First, split out any predication code. Ignore mnemonics we know aren't
4118  // predicated but do have a carry-set and so weren't caught above.
4119  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4120      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4121      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4122      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4123    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4124      .Case("eq", ARMCC::EQ)
4125      .Case("ne", ARMCC::NE)
4126      .Case("hs", ARMCC::HS)
4127      .Case("cs", ARMCC::HS)
4128      .Case("lo", ARMCC::LO)
4129      .Case("cc", ARMCC::LO)
4130      .Case("mi", ARMCC::MI)
4131      .Case("pl", ARMCC::PL)
4132      .Case("vs", ARMCC::VS)
4133      .Case("vc", ARMCC::VC)
4134      .Case("hi", ARMCC::HI)
4135      .Case("ls", ARMCC::LS)
4136      .Case("ge", ARMCC::GE)
4137      .Case("lt", ARMCC::LT)
4138      .Case("gt", ARMCC::GT)
4139      .Case("le", ARMCC::LE)
4140      .Case("al", ARMCC::AL)
4141      .Default(~0U);
4142    if (CC != ~0U) {
4143      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4144      PredicationCode = CC;
4145    }
4146  }
4147
4148  // Next, determine if we have a carry setting bit. We explicitly ignore all
4149  // the instructions we know end in 's'.
4150  if (Mnemonic.endswith("s") &&
4151      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4152        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4153        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4154        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4155        Mnemonic == "vrsqrts" || Mnemonic == "srs" ||
4156        (Mnemonic == "movs" && isThumb()))) {
4157    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4158    CarrySetting = true;
4159  }
4160
4161  // The "cps" instruction can have a interrupt mode operand which is glued into
4162  // the mnemonic. Check if this is the case, split it and parse the imod op
4163  if (Mnemonic.startswith("cps")) {
4164    // Split out any imod code.
4165    unsigned IMod =
4166      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4167      .Case("ie", ARM_PROC::IE)
4168      .Case("id", ARM_PROC::ID)
4169      .Default(~0U);
4170    if (IMod != ~0U) {
4171      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4172      ProcessorIMod = IMod;
4173    }
4174  }
4175
4176  // The "it" instruction has the condition mask on the end of the mnemonic.
4177  if (Mnemonic.startswith("it")) {
4178    ITMask = Mnemonic.slice(2, Mnemonic.size());
4179    Mnemonic = Mnemonic.slice(0, 2);
4180  }
4181
4182  return Mnemonic;
4183}
4184
4185/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4186/// inclusion of carry set or predication code operands.
4187//
4188// FIXME: It would be nice to autogen this.
4189void ARMAsmParser::
4190getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4191                      bool &CanAcceptPredicationCode) {
4192  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4193      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4194      Mnemonic == "add" || Mnemonic == "adc" ||
4195      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4196      Mnemonic == "orr" || Mnemonic == "mvn" ||
4197      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4198      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4199      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4200                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4201                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4202    CanAcceptCarrySet = true;
4203  } else
4204    CanAcceptCarrySet = false;
4205
4206  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4207      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4208      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4209      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4210      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4211      (Mnemonic == "clrex" && !isThumb()) ||
4212      (Mnemonic == "nop" && isThumbOne()) ||
4213      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4214        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4215        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4216      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4217       !isThumb()) ||
4218      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4219    CanAcceptPredicationCode = false;
4220  } else
4221    CanAcceptPredicationCode = true;
4222
4223  if (isThumb()) {
4224    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4225        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4226      CanAcceptPredicationCode = false;
4227  }
4228}
4229
4230bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4231                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4232  // FIXME: This is all horribly hacky. We really need a better way to deal
4233  // with optional operands like this in the matcher table.
4234
4235  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4236  // another does not. Specifically, the MOVW instruction does not. So we
4237  // special case it here and remove the defaulted (non-setting) cc_out
4238  // operand if that's the instruction we're trying to match.
4239  //
4240  // We do this as post-processing of the explicit operands rather than just
4241  // conditionally adding the cc_out in the first place because we need
4242  // to check the type of the parsed immediate operand.
4243  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4244      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4245      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4246      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4247    return true;
4248
4249  // Register-register 'add' for thumb does not have a cc_out operand
4250  // when there are only two register operands.
4251  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4252      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4253      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4254      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4255    return true;
4256  // Register-register 'add' for thumb does not have a cc_out operand
4257  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4258  // have to check the immediate range here since Thumb2 has a variant
4259  // that can handle a different range and has a cc_out operand.
4260  if (((isThumb() && Mnemonic == "add") ||
4261       (isThumbTwo() && Mnemonic == "sub")) &&
4262      Operands.size() == 6 &&
4263      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4264      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4265      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4266      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4267      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4268       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4269    return true;
4270  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4271  // imm0_4095 variant. That's the least-preferred variant when
4272  // selecting via the generic "add" mnemonic, so to know that we
4273  // should remove the cc_out operand, we have to explicitly check that
4274  // it's not one of the other variants. Ugh.
4275  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4276      Operands.size() == 6 &&
4277      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4278      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4279      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4280    // Nest conditions rather than one big 'if' statement for readability.
4281    //
4282    // If either register is a high reg, it's either one of the SP
4283    // variants (handled above) or a 32-bit encoding, so we just
4284    // check against T3.
4285    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4286         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4287        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4288      return false;
4289    // If both registers are low, we're in an IT block, and the immediate is
4290    // in range, we should use encoding T1 instead, which has a cc_out.
4291    if (inITBlock() &&
4292        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4293        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4294        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4295      return false;
4296
4297    // Otherwise, we use encoding T4, which does not have a cc_out
4298    // operand.
4299    return true;
4300  }
4301
4302  // The thumb2 multiply instruction doesn't have a CCOut register, so
4303  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4304  // use the 16-bit encoding or not.
4305  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4306      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4307      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4308      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4309      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4310      // If the registers aren't low regs, the destination reg isn't the
4311      // same as one of the source regs, or the cc_out operand is zero
4312      // outside of an IT block, we have to use the 32-bit encoding, so
4313      // remove the cc_out operand.
4314      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4315       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4316       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4317       !inITBlock() ||
4318       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4319        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4320        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4321        static_cast<ARMOperand*>(Operands[4])->getReg())))
4322    return true;
4323
4324  // Also check the 'mul' syntax variant that doesn't specify an explicit
4325  // destination register.
4326  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4327      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4328      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4329      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4330      // If the registers aren't low regs  or the cc_out operand is zero
4331      // outside of an IT block, we have to use the 32-bit encoding, so
4332      // remove the cc_out operand.
4333      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4334       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4335       !inITBlock()))
4336    return true;
4337
4338
4339
4340  // Register-register 'add/sub' for thumb does not have a cc_out operand
4341  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4342  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4343  // right, this will result in better diagnostics (which operand is off)
4344  // anyway.
4345  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4346      (Operands.size() == 5 || Operands.size() == 6) &&
4347      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4348      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4349      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4350    return true;
4351
4352  return false;
4353}
4354
4355static bool isDataTypeToken(StringRef Tok) {
4356  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4357    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4358    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4359    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4360    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4361    Tok == ".f" || Tok == ".d";
4362}
4363
4364// FIXME: This bit should probably be handled via an explicit match class
4365// in the .td files that matches the suffix instead of having it be
4366// a literal string token the way it is now.
4367static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4368  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4369}
4370
4371/// Parse an arm instruction mnemonic followed by its operands.
4372bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4373                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4374  // Create the leading tokens for the mnemonic, split by '.' characters.
4375  size_t Start = 0, Next = Name.find('.');
4376  StringRef Mnemonic = Name.slice(Start, Next);
4377
4378  // Split out the predication code and carry setting flag from the mnemonic.
4379  unsigned PredicationCode;
4380  unsigned ProcessorIMod;
4381  bool CarrySetting;
4382  StringRef ITMask;
4383  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4384                           ProcessorIMod, ITMask);
4385
4386  // In Thumb1, only the branch (B) instruction can be predicated.
4387  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4388    Parser.EatToEndOfStatement();
4389    return Error(NameLoc, "conditional execution not supported in Thumb1");
4390  }
4391
4392  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4393
4394  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4395  // is the mask as it will be for the IT encoding if the conditional
4396  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4397  // where the conditional bit0 is zero, the instruction post-processing
4398  // will adjust the mask accordingly.
4399  if (Mnemonic == "it") {
4400    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4401    if (ITMask.size() > 3) {
4402      Parser.EatToEndOfStatement();
4403      return Error(Loc, "too many conditions on IT instruction");
4404    }
4405    unsigned Mask = 8;
4406    for (unsigned i = ITMask.size(); i != 0; --i) {
4407      char pos = ITMask[i - 1];
4408      if (pos != 't' && pos != 'e') {
4409        Parser.EatToEndOfStatement();
4410        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4411      }
4412      Mask >>= 1;
4413      if (ITMask[i - 1] == 't')
4414        Mask |= 8;
4415    }
4416    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4417  }
4418
4419  // FIXME: This is all a pretty gross hack. We should automatically handle
4420  // optional operands like this via tblgen.
4421
4422  // Next, add the CCOut and ConditionCode operands, if needed.
4423  //
4424  // For mnemonics which can ever incorporate a carry setting bit or predication
4425  // code, our matching model involves us always generating CCOut and
4426  // ConditionCode operands to match the mnemonic "as written" and then we let
4427  // the matcher deal with finding the right instruction or generating an
4428  // appropriate error.
4429  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4430  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4431
4432  // If we had a carry-set on an instruction that can't do that, issue an
4433  // error.
4434  if (!CanAcceptCarrySet && CarrySetting) {
4435    Parser.EatToEndOfStatement();
4436    return Error(NameLoc, "instruction '" + Mnemonic +
4437                 "' can not set flags, but 's' suffix specified");
4438  }
4439  // If we had a predication code on an instruction that can't do that, issue an
4440  // error.
4441  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4442    Parser.EatToEndOfStatement();
4443    return Error(NameLoc, "instruction '" + Mnemonic +
4444                 "' is not predicable, but condition code specified");
4445  }
4446
4447  // Add the carry setting operand, if necessary.
4448  if (CanAcceptCarrySet) {
4449    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4450    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4451                                               Loc));
4452  }
4453
4454  // Add the predication code operand, if necessary.
4455  if (CanAcceptPredicationCode) {
4456    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4457                                      CarrySetting);
4458    Operands.push_back(ARMOperand::CreateCondCode(
4459                         ARMCC::CondCodes(PredicationCode), Loc));
4460  }
4461
4462  // Add the processor imod operand, if necessary.
4463  if (ProcessorIMod) {
4464    Operands.push_back(ARMOperand::CreateImm(
4465          MCConstantExpr::Create(ProcessorIMod, getContext()),
4466                                 NameLoc, NameLoc));
4467  }
4468
4469  // Add the remaining tokens in the mnemonic.
4470  while (Next != StringRef::npos) {
4471    Start = Next;
4472    Next = Name.find('.', Start + 1);
4473    StringRef ExtraToken = Name.slice(Start, Next);
4474
4475    // Some NEON instructions have an optional datatype suffix that is
4476    // completely ignored. Check for that.
4477    if (isDataTypeToken(ExtraToken) &&
4478        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4479      continue;
4480
4481    if (ExtraToken != ".n") {
4482      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4483      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4484    }
4485  }
4486
4487  // Read the remaining operands.
4488  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4489    // Read the first operand.
4490    if (parseOperand(Operands, Mnemonic)) {
4491      Parser.EatToEndOfStatement();
4492      return true;
4493    }
4494
4495    while (getLexer().is(AsmToken::Comma)) {
4496      Parser.Lex();  // Eat the comma.
4497
4498      // Parse and remember the operand.
4499      if (parseOperand(Operands, Mnemonic)) {
4500        Parser.EatToEndOfStatement();
4501        return true;
4502      }
4503    }
4504  }
4505
4506  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4507    SMLoc Loc = getLexer().getLoc();
4508    Parser.EatToEndOfStatement();
4509    return Error(Loc, "unexpected token in argument list");
4510  }
4511
4512  Parser.Lex(); // Consume the EndOfStatement
4513
4514  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4515  // do and don't have a cc_out optional-def operand. With some spot-checks
4516  // of the operand list, we can figure out which variant we're trying to
4517  // parse and adjust accordingly before actually matching. We shouldn't ever
4518  // try to remove a cc_out operand that was explicitly set on the the
4519  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4520  // table driven matcher doesn't fit well with the ARM instruction set.
4521  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4522    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4523    Operands.erase(Operands.begin() + 1);
4524    delete Op;
4525  }
4526
4527  // ARM mode 'blx' need special handling, as the register operand version
4528  // is predicable, but the label operand version is not. So, we can't rely
4529  // on the Mnemonic based checking to correctly figure out when to put
4530  // a k_CondCode operand in the list. If we're trying to match the label
4531  // version, remove the k_CondCode operand here.
4532  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4533      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4534    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4535    Operands.erase(Operands.begin() + 1);
4536    delete Op;
4537  }
4538
4539  // The vector-compare-to-zero instructions have a literal token "#0" at
4540  // the end that comes to here as an immediate operand. Convert it to a
4541  // token to play nicely with the matcher.
4542  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4543      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4544      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4545    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4546    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4547    if (CE && CE->getValue() == 0) {
4548      Operands.erase(Operands.begin() + 5);
4549      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4550      delete Op;
4551    }
4552  }
4553  // VCMP{E} does the same thing, but with a different operand count.
4554  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4555      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4556    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4557    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4558    if (CE && CE->getValue() == 0) {
4559      Operands.erase(Operands.begin() + 4);
4560      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4561      delete Op;
4562    }
4563  }
4564  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4565  // end. Convert it to a token here.
4566  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4567      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4568    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4569    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4570    if (CE && CE->getValue() == 0) {
4571      Operands.erase(Operands.begin() + 5);
4572      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4573      delete Op;
4574    }
4575  }
4576
4577  return false;
4578}
4579
4580// Validate context-sensitive operand constraints.
4581
4582// return 'true' if register list contains non-low GPR registers,
4583// 'false' otherwise. If Reg is in the register list or is HiReg, set
4584// 'containsReg' to true.
4585static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4586                                 unsigned HiReg, bool &containsReg) {
4587  containsReg = false;
4588  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4589    unsigned OpReg = Inst.getOperand(i).getReg();
4590    if (OpReg == Reg)
4591      containsReg = true;
4592    // Anything other than a low register isn't legal here.
4593    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4594      return true;
4595  }
4596  return false;
4597}
4598
4599// Check if the specified regisgter is in the register list of the inst,
4600// starting at the indicated operand number.
4601static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4602  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4603    unsigned OpReg = Inst.getOperand(i).getReg();
4604    if (OpReg == Reg)
4605      return true;
4606  }
4607  return false;
4608}
4609
4610// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4611// the ARMInsts array) instead. Getting that here requires awkward
4612// API changes, though. Better way?
4613namespace llvm {
4614extern const MCInstrDesc ARMInsts[];
4615}
4616static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4617  return ARMInsts[Opcode];
4618}
4619
4620// FIXME: We would really like to be able to tablegen'erate this.
4621bool ARMAsmParser::
4622validateInstruction(MCInst &Inst,
4623                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4624  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4625  SMLoc Loc = Operands[0]->getStartLoc();
4626  // Check the IT block state first.
4627  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4628  // being allowed in IT blocks, but not being predicable.  It just always
4629  // executes.
4630  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4631    unsigned bit = 1;
4632    if (ITState.FirstCond)
4633      ITState.FirstCond = false;
4634    else
4635      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4636    // The instruction must be predicable.
4637    if (!MCID.isPredicable())
4638      return Error(Loc, "instructions in IT block must be predicable");
4639    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4640    unsigned ITCond = bit ? ITState.Cond :
4641      ARMCC::getOppositeCondition(ITState.Cond);
4642    if (Cond != ITCond) {
4643      // Find the condition code Operand to get its SMLoc information.
4644      SMLoc CondLoc;
4645      for (unsigned i = 1; i < Operands.size(); ++i)
4646        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4647          CondLoc = Operands[i]->getStartLoc();
4648      return Error(CondLoc, "incorrect condition in IT block; got '" +
4649                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4650                   "', but expected '" +
4651                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4652    }
4653  // Check for non-'al' condition codes outside of the IT block.
4654  } else if (isThumbTwo() && MCID.isPredicable() &&
4655             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4656             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4657             Inst.getOpcode() != ARM::t2B)
4658    return Error(Loc, "predicated instructions must be in IT block");
4659
4660  switch (Inst.getOpcode()) {
4661  case ARM::LDRD:
4662  case ARM::LDRD_PRE:
4663  case ARM::LDRD_POST:
4664  case ARM::LDREXD: {
4665    // Rt2 must be Rt + 1.
4666    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4667    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4668    if (Rt2 != Rt + 1)
4669      return Error(Operands[3]->getStartLoc(),
4670                   "destination operands must be sequential");
4671    return false;
4672  }
4673  case ARM::STRD: {
4674    // Rt2 must be Rt + 1.
4675    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4676    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4677    if (Rt2 != Rt + 1)
4678      return Error(Operands[3]->getStartLoc(),
4679                   "source operands must be sequential");
4680    return false;
4681  }
4682  case ARM::STRD_PRE:
4683  case ARM::STRD_POST:
4684  case ARM::STREXD: {
4685    // Rt2 must be Rt + 1.
4686    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4687    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
4688    if (Rt2 != Rt + 1)
4689      return Error(Operands[3]->getStartLoc(),
4690                   "source operands must be sequential");
4691    return false;
4692  }
4693  case ARM::SBFX:
4694  case ARM::UBFX: {
4695    // width must be in range [1, 32-lsb]
4696    unsigned lsb = Inst.getOperand(2).getImm();
4697    unsigned widthm1 = Inst.getOperand(3).getImm();
4698    if (widthm1 >= 32 - lsb)
4699      return Error(Operands[5]->getStartLoc(),
4700                   "bitfield width must be in range [1,32-lsb]");
4701    return false;
4702  }
4703  case ARM::tLDMIA: {
4704    // If we're parsing Thumb2, the .w variant is available and handles
4705    // most cases that are normally illegal for a Thumb1 LDM
4706    // instruction. We'll make the transformation in processInstruction()
4707    // if necessary.
4708    //
4709    // Thumb LDM instructions are writeback iff the base register is not
4710    // in the register list.
4711    unsigned Rn = Inst.getOperand(0).getReg();
4712    bool hasWritebackToken =
4713      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
4714       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
4715    bool listContainsBase;
4716    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
4717      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
4718                   "registers must be in range r0-r7");
4719    // If we should have writeback, then there should be a '!' token.
4720    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
4721      return Error(Operands[2]->getStartLoc(),
4722                   "writeback operator '!' expected");
4723    // If we should not have writeback, there must not be a '!'. This is
4724    // true even for the 32-bit wide encodings.
4725    if (listContainsBase && hasWritebackToken)
4726      return Error(Operands[3]->getStartLoc(),
4727                   "writeback operator '!' not allowed when base register "
4728                   "in register list");
4729
4730    break;
4731  }
4732  case ARM::t2LDMIA_UPD: {
4733    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
4734      return Error(Operands[4]->getStartLoc(),
4735                   "writeback operator '!' not allowed when base register "
4736                   "in register list");
4737    break;
4738  }
4739  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
4740  // so only issue a diagnostic for thumb1. The instructions will be
4741  // switched to the t2 encodings in processInstruction() if necessary.
4742  case ARM::tPOP: {
4743    bool listContainsBase;
4744    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
4745        !isThumbTwo())
4746      return Error(Operands[2]->getStartLoc(),
4747                   "registers must be in range r0-r7 or pc");
4748    break;
4749  }
4750  case ARM::tPUSH: {
4751    bool listContainsBase;
4752    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
4753        !isThumbTwo())
4754      return Error(Operands[2]->getStartLoc(),
4755                   "registers must be in range r0-r7 or lr");
4756    break;
4757  }
4758  case ARM::tSTMIA_UPD: {
4759    bool listContainsBase;
4760    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
4761      return Error(Operands[4]->getStartLoc(),
4762                   "registers must be in range r0-r7");
4763    break;
4764  }
4765  }
4766
4767  return false;
4768}
4769
4770static unsigned getRealVSTLNOpcode(unsigned Opc) {
4771  switch(Opc) {
4772  default: assert(0 && "unexpected opcode!");
4773  case ARM::VST1LNdWB_fixed_Asm_8:   return ARM::VST1LNd8_UPD;
4774  case ARM::VST1LNdWB_fixed_Asm_P8:  return ARM::VST1LNd8_UPD;
4775  case ARM::VST1LNdWB_fixed_Asm_I8:  return ARM::VST1LNd8_UPD;
4776  case ARM::VST1LNdWB_fixed_Asm_S8:  return ARM::VST1LNd8_UPD;
4777  case ARM::VST1LNdWB_fixed_Asm_U8:  return ARM::VST1LNd8_UPD;
4778  case ARM::VST1LNdWB_fixed_Asm_16:  return ARM::VST1LNd16_UPD;
4779  case ARM::VST1LNdWB_fixed_Asm_P16: return ARM::VST1LNd16_UPD;
4780  case ARM::VST1LNdWB_fixed_Asm_I16: return ARM::VST1LNd16_UPD;
4781  case ARM::VST1LNdWB_fixed_Asm_S16: return ARM::VST1LNd16_UPD;
4782  case ARM::VST1LNdWB_fixed_Asm_U16: return ARM::VST1LNd16_UPD;
4783  case ARM::VST1LNdWB_fixed_Asm_32:  return ARM::VST1LNd32_UPD;
4784  case ARM::VST1LNdWB_fixed_Asm_F:   return ARM::VST1LNd32_UPD;
4785  case ARM::VST1LNdWB_fixed_Asm_F32: return ARM::VST1LNd32_UPD;
4786  case ARM::VST1LNdWB_fixed_Asm_I32: return ARM::VST1LNd32_UPD;
4787  case ARM::VST1LNdWB_fixed_Asm_S32: return ARM::VST1LNd32_UPD;
4788  case ARM::VST1LNdWB_fixed_Asm_U32: return ARM::VST1LNd32_UPD;
4789  case ARM::VST1LNdWB_register_Asm_8:   return ARM::VST1LNd8_UPD;
4790  case ARM::VST1LNdWB_register_Asm_P8:  return ARM::VST1LNd8_UPD;
4791  case ARM::VST1LNdWB_register_Asm_I8:  return ARM::VST1LNd8_UPD;
4792  case ARM::VST1LNdWB_register_Asm_S8:  return ARM::VST1LNd8_UPD;
4793  case ARM::VST1LNdWB_register_Asm_U8:  return ARM::VST1LNd8_UPD;
4794  case ARM::VST1LNdWB_register_Asm_16:  return ARM::VST1LNd16_UPD;
4795  case ARM::VST1LNdWB_register_Asm_P16: return ARM::VST1LNd16_UPD;
4796  case ARM::VST1LNdWB_register_Asm_I16: return ARM::VST1LNd16_UPD;
4797  case ARM::VST1LNdWB_register_Asm_S16: return ARM::VST1LNd16_UPD;
4798  case ARM::VST1LNdWB_register_Asm_U16: return ARM::VST1LNd16_UPD;
4799  case ARM::VST1LNdWB_register_Asm_32:  return ARM::VST1LNd32_UPD;
4800  case ARM::VST1LNdWB_register_Asm_F:   return ARM::VST1LNd32_UPD;
4801  case ARM::VST1LNdWB_register_Asm_F32: return ARM::VST1LNd32_UPD;
4802  case ARM::VST1LNdWB_register_Asm_I32: return ARM::VST1LNd32_UPD;
4803  case ARM::VST1LNdWB_register_Asm_S32: return ARM::VST1LNd32_UPD;
4804  case ARM::VST1LNdWB_register_Asm_U32: return ARM::VST1LNd32_UPD;
4805  case ARM::VST1LNdAsm_8:   return ARM::VST1LNd8;
4806  case ARM::VST1LNdAsm_P8:  return ARM::VST1LNd8;
4807  case ARM::VST1LNdAsm_I8:  return ARM::VST1LNd8;
4808  case ARM::VST1LNdAsm_S8:  return ARM::VST1LNd8;
4809  case ARM::VST1LNdAsm_U8:  return ARM::VST1LNd8;
4810  case ARM::VST1LNdAsm_16:  return ARM::VST1LNd16;
4811  case ARM::VST1LNdAsm_P16: return ARM::VST1LNd16;
4812  case ARM::VST1LNdAsm_I16: return ARM::VST1LNd16;
4813  case ARM::VST1LNdAsm_S16: return ARM::VST1LNd16;
4814  case ARM::VST1LNdAsm_U16: return ARM::VST1LNd16;
4815  case ARM::VST1LNdAsm_32:  return ARM::VST1LNd32;
4816  case ARM::VST1LNdAsm_F:   return ARM::VST1LNd32;
4817  case ARM::VST1LNdAsm_F32: return ARM::VST1LNd32;
4818  case ARM::VST1LNdAsm_I32: return ARM::VST1LNd32;
4819  case ARM::VST1LNdAsm_S32: return ARM::VST1LNd32;
4820  case ARM::VST1LNdAsm_U32: return ARM::VST1LNd32;
4821  }
4822}
4823
4824static unsigned getRealVLDLNOpcode(unsigned Opc) {
4825  switch(Opc) {
4826  default: assert(0 && "unexpected opcode!");
4827  case ARM::VLD1LNdWB_fixed_Asm_8:   return ARM::VLD1LNd8_UPD;
4828  case ARM::VLD1LNdWB_fixed_Asm_P8:  return ARM::VLD1LNd8_UPD;
4829  case ARM::VLD1LNdWB_fixed_Asm_I8:  return ARM::VLD1LNd8_UPD;
4830  case ARM::VLD1LNdWB_fixed_Asm_S8:  return ARM::VLD1LNd8_UPD;
4831  case ARM::VLD1LNdWB_fixed_Asm_U8:  return ARM::VLD1LNd8_UPD;
4832  case ARM::VLD1LNdWB_fixed_Asm_16:  return ARM::VLD1LNd16_UPD;
4833  case ARM::VLD1LNdWB_fixed_Asm_P16: return ARM::VLD1LNd16_UPD;
4834  case ARM::VLD1LNdWB_fixed_Asm_I16: return ARM::VLD1LNd16_UPD;
4835  case ARM::VLD1LNdWB_fixed_Asm_S16: return ARM::VLD1LNd16_UPD;
4836  case ARM::VLD1LNdWB_fixed_Asm_U16: return ARM::VLD1LNd16_UPD;
4837  case ARM::VLD1LNdWB_fixed_Asm_32:  return ARM::VLD1LNd32_UPD;
4838  case ARM::VLD1LNdWB_fixed_Asm_F:   return ARM::VLD1LNd32_UPD;
4839  case ARM::VLD1LNdWB_fixed_Asm_F32: return ARM::VLD1LNd32_UPD;
4840  case ARM::VLD1LNdWB_fixed_Asm_I32: return ARM::VLD1LNd32_UPD;
4841  case ARM::VLD1LNdWB_fixed_Asm_S32: return ARM::VLD1LNd32_UPD;
4842  case ARM::VLD1LNdWB_fixed_Asm_U32: return ARM::VLD1LNd32_UPD;
4843  case ARM::VLD1LNdWB_register_Asm_8:   return ARM::VLD1LNd8_UPD;
4844  case ARM::VLD1LNdWB_register_Asm_P8:  return ARM::VLD1LNd8_UPD;
4845  case ARM::VLD1LNdWB_register_Asm_I8:  return ARM::VLD1LNd8_UPD;
4846  case ARM::VLD1LNdWB_register_Asm_S8:  return ARM::VLD1LNd8_UPD;
4847  case ARM::VLD1LNdWB_register_Asm_U8:  return ARM::VLD1LNd8_UPD;
4848  case ARM::VLD1LNdWB_register_Asm_16:  return ARM::VLD1LNd16_UPD;
4849  case ARM::VLD1LNdWB_register_Asm_P16: return ARM::VLD1LNd16_UPD;
4850  case ARM::VLD1LNdWB_register_Asm_I16: return ARM::VLD1LNd16_UPD;
4851  case ARM::VLD1LNdWB_register_Asm_S16: return ARM::VLD1LNd16_UPD;
4852  case ARM::VLD1LNdWB_register_Asm_U16: return ARM::VLD1LNd16_UPD;
4853  case ARM::VLD1LNdWB_register_Asm_32:  return ARM::VLD1LNd32_UPD;
4854  case ARM::VLD1LNdWB_register_Asm_F:   return ARM::VLD1LNd32_UPD;
4855  case ARM::VLD1LNdWB_register_Asm_F32: return ARM::VLD1LNd32_UPD;
4856  case ARM::VLD1LNdWB_register_Asm_I32: return ARM::VLD1LNd32_UPD;
4857  case ARM::VLD1LNdWB_register_Asm_S32: return ARM::VLD1LNd32_UPD;
4858  case ARM::VLD1LNdWB_register_Asm_U32: return ARM::VLD1LNd32_UPD;
4859  case ARM::VLD1LNdAsm_8:   return ARM::VLD1LNd8;
4860  case ARM::VLD1LNdAsm_P8:  return ARM::VLD1LNd8;
4861  case ARM::VLD1LNdAsm_I8:  return ARM::VLD1LNd8;
4862  case ARM::VLD1LNdAsm_S8:  return ARM::VLD1LNd8;
4863  case ARM::VLD1LNdAsm_U8:  return ARM::VLD1LNd8;
4864  case ARM::VLD1LNdAsm_16:  return ARM::VLD1LNd16;
4865  case ARM::VLD1LNdAsm_P16: return ARM::VLD1LNd16;
4866  case ARM::VLD1LNdAsm_I16: return ARM::VLD1LNd16;
4867  case ARM::VLD1LNdAsm_S16: return ARM::VLD1LNd16;
4868  case ARM::VLD1LNdAsm_U16: return ARM::VLD1LNd16;
4869  case ARM::VLD1LNdAsm_32:  return ARM::VLD1LNd32;
4870  case ARM::VLD1LNdAsm_F:   return ARM::VLD1LNd32;
4871  case ARM::VLD1LNdAsm_F32: return ARM::VLD1LNd32;
4872  case ARM::VLD1LNdAsm_I32: return ARM::VLD1LNd32;
4873  case ARM::VLD1LNdAsm_S32: return ARM::VLD1LNd32;
4874  case ARM::VLD1LNdAsm_U32: return ARM::VLD1LNd32;
4875  }
4876}
4877
4878bool ARMAsmParser::
4879processInstruction(MCInst &Inst,
4880                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4881  switch (Inst.getOpcode()) {
4882  // Handle NEON VST1 complex aliases.
4883  case ARM::VST1LNdWB_register_Asm_8:
4884  case ARM::VST1LNdWB_register_Asm_P8:
4885  case ARM::VST1LNdWB_register_Asm_I8:
4886  case ARM::VST1LNdWB_register_Asm_S8:
4887  case ARM::VST1LNdWB_register_Asm_U8:
4888  case ARM::VST1LNdWB_register_Asm_16:
4889  case ARM::VST1LNdWB_register_Asm_P16:
4890  case ARM::VST1LNdWB_register_Asm_I16:
4891  case ARM::VST1LNdWB_register_Asm_S16:
4892  case ARM::VST1LNdWB_register_Asm_U16:
4893  case ARM::VST1LNdWB_register_Asm_32:
4894  case ARM::VST1LNdWB_register_Asm_F:
4895  case ARM::VST1LNdWB_register_Asm_F32:
4896  case ARM::VST1LNdWB_register_Asm_I32:
4897  case ARM::VST1LNdWB_register_Asm_S32:
4898  case ARM::VST1LNdWB_register_Asm_U32: {
4899    MCInst TmpInst;
4900    // Shuffle the operands around so the lane index operand is in the
4901    // right place.
4902    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
4903    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
4904    TmpInst.addOperand(Inst.getOperand(2)); // Rn
4905    TmpInst.addOperand(Inst.getOperand(3)); // alignment
4906    TmpInst.addOperand(Inst.getOperand(4)); // Rm
4907    TmpInst.addOperand(Inst.getOperand(0)); // Vd
4908    TmpInst.addOperand(Inst.getOperand(1)); // lane
4909    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
4910    TmpInst.addOperand(Inst.getOperand(6));
4911    Inst = TmpInst;
4912    return true;
4913  }
4914  case ARM::VST1LNdWB_fixed_Asm_8:
4915  case ARM::VST1LNdWB_fixed_Asm_P8:
4916  case ARM::VST1LNdWB_fixed_Asm_I8:
4917  case ARM::VST1LNdWB_fixed_Asm_S8:
4918  case ARM::VST1LNdWB_fixed_Asm_U8:
4919  case ARM::VST1LNdWB_fixed_Asm_16:
4920  case ARM::VST1LNdWB_fixed_Asm_P16:
4921  case ARM::VST1LNdWB_fixed_Asm_I16:
4922  case ARM::VST1LNdWB_fixed_Asm_S16:
4923  case ARM::VST1LNdWB_fixed_Asm_U16:
4924  case ARM::VST1LNdWB_fixed_Asm_32:
4925  case ARM::VST1LNdWB_fixed_Asm_F:
4926  case ARM::VST1LNdWB_fixed_Asm_F32:
4927  case ARM::VST1LNdWB_fixed_Asm_I32:
4928  case ARM::VST1LNdWB_fixed_Asm_S32:
4929  case ARM::VST1LNdWB_fixed_Asm_U32: {
4930    MCInst TmpInst;
4931    // Shuffle the operands around so the lane index operand is in the
4932    // right place.
4933    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
4934    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
4935    TmpInst.addOperand(Inst.getOperand(2)); // Rn
4936    TmpInst.addOperand(Inst.getOperand(3)); // alignment
4937    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
4938    TmpInst.addOperand(Inst.getOperand(0)); // Vd
4939    TmpInst.addOperand(Inst.getOperand(1)); // lane
4940    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
4941    TmpInst.addOperand(Inst.getOperand(5));
4942    Inst = TmpInst;
4943    return true;
4944  }
4945  case ARM::VST1LNdAsm_8:
4946  case ARM::VST1LNdAsm_P8:
4947  case ARM::VST1LNdAsm_I8:
4948  case ARM::VST1LNdAsm_S8:
4949  case ARM::VST1LNdAsm_U8:
4950  case ARM::VST1LNdAsm_16:
4951  case ARM::VST1LNdAsm_P16:
4952  case ARM::VST1LNdAsm_I16:
4953  case ARM::VST1LNdAsm_S16:
4954  case ARM::VST1LNdAsm_U16:
4955  case ARM::VST1LNdAsm_32:
4956  case ARM::VST1LNdAsm_F:
4957  case ARM::VST1LNdAsm_F32:
4958  case ARM::VST1LNdAsm_I32:
4959  case ARM::VST1LNdAsm_S32:
4960  case ARM::VST1LNdAsm_U32: {
4961    MCInst TmpInst;
4962    // Shuffle the operands around so the lane index operand is in the
4963    // right place.
4964    TmpInst.setOpcode(getRealVSTLNOpcode(Inst.getOpcode()));
4965    TmpInst.addOperand(Inst.getOperand(2)); // Rn
4966    TmpInst.addOperand(Inst.getOperand(3)); // alignment
4967    TmpInst.addOperand(Inst.getOperand(0)); // Vd
4968    TmpInst.addOperand(Inst.getOperand(1)); // lane
4969    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
4970    TmpInst.addOperand(Inst.getOperand(5));
4971    Inst = TmpInst;
4972    return true;
4973  }
4974  // Handle NEON VLD1 complex aliases.
4975  case ARM::VLD1LNdWB_register_Asm_8:
4976  case ARM::VLD1LNdWB_register_Asm_P8:
4977  case ARM::VLD1LNdWB_register_Asm_I8:
4978  case ARM::VLD1LNdWB_register_Asm_S8:
4979  case ARM::VLD1LNdWB_register_Asm_U8:
4980  case ARM::VLD1LNdWB_register_Asm_16:
4981  case ARM::VLD1LNdWB_register_Asm_P16:
4982  case ARM::VLD1LNdWB_register_Asm_I16:
4983  case ARM::VLD1LNdWB_register_Asm_S16:
4984  case ARM::VLD1LNdWB_register_Asm_U16:
4985  case ARM::VLD1LNdWB_register_Asm_32:
4986  case ARM::VLD1LNdWB_register_Asm_F:
4987  case ARM::VLD1LNdWB_register_Asm_F32:
4988  case ARM::VLD1LNdWB_register_Asm_I32:
4989  case ARM::VLD1LNdWB_register_Asm_S32:
4990  case ARM::VLD1LNdWB_register_Asm_U32: {
4991    MCInst TmpInst;
4992    // Shuffle the operands around so the lane index operand is in the
4993    // right place.
4994    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
4995    TmpInst.addOperand(Inst.getOperand(0)); // Vd
4996    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
4997    TmpInst.addOperand(Inst.getOperand(2)); // Rn
4998    TmpInst.addOperand(Inst.getOperand(3)); // alignment
4999    TmpInst.addOperand(Inst.getOperand(4)); // Rm
5000    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5001    TmpInst.addOperand(Inst.getOperand(1)); // lane
5002    TmpInst.addOperand(Inst.getOperand(5)); // CondCode
5003    TmpInst.addOperand(Inst.getOperand(6));
5004    Inst = TmpInst;
5005    return true;
5006  }
5007  case ARM::VLD1LNdWB_fixed_Asm_8:
5008  case ARM::VLD1LNdWB_fixed_Asm_P8:
5009  case ARM::VLD1LNdWB_fixed_Asm_I8:
5010  case ARM::VLD1LNdWB_fixed_Asm_S8:
5011  case ARM::VLD1LNdWB_fixed_Asm_U8:
5012  case ARM::VLD1LNdWB_fixed_Asm_16:
5013  case ARM::VLD1LNdWB_fixed_Asm_P16:
5014  case ARM::VLD1LNdWB_fixed_Asm_I16:
5015  case ARM::VLD1LNdWB_fixed_Asm_S16:
5016  case ARM::VLD1LNdWB_fixed_Asm_U16:
5017  case ARM::VLD1LNdWB_fixed_Asm_32:
5018  case ARM::VLD1LNdWB_fixed_Asm_F:
5019  case ARM::VLD1LNdWB_fixed_Asm_F32:
5020  case ARM::VLD1LNdWB_fixed_Asm_I32:
5021  case ARM::VLD1LNdWB_fixed_Asm_S32:
5022  case ARM::VLD1LNdWB_fixed_Asm_U32: {
5023    MCInst TmpInst;
5024    // Shuffle the operands around so the lane index operand is in the
5025    // right place.
5026    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5027    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5028    TmpInst.addOperand(Inst.getOperand(2)); // Rn_wb
5029    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5030    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5031    TmpInst.addOperand(MCOperand::CreateReg(0)); // Rm
5032    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5033    TmpInst.addOperand(Inst.getOperand(1)); // lane
5034    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5035    TmpInst.addOperand(Inst.getOperand(5));
5036    Inst = TmpInst;
5037    return true;
5038  }
5039  case ARM::VLD1LNdAsm_8:
5040  case ARM::VLD1LNdAsm_P8:
5041  case ARM::VLD1LNdAsm_I8:
5042  case ARM::VLD1LNdAsm_S8:
5043  case ARM::VLD1LNdAsm_U8:
5044  case ARM::VLD1LNdAsm_16:
5045  case ARM::VLD1LNdAsm_P16:
5046  case ARM::VLD1LNdAsm_I16:
5047  case ARM::VLD1LNdAsm_S16:
5048  case ARM::VLD1LNdAsm_U16:
5049  case ARM::VLD1LNdAsm_32:
5050  case ARM::VLD1LNdAsm_F:
5051  case ARM::VLD1LNdAsm_F32:
5052  case ARM::VLD1LNdAsm_I32:
5053  case ARM::VLD1LNdAsm_S32:
5054  case ARM::VLD1LNdAsm_U32: {
5055    MCInst TmpInst;
5056    // Shuffle the operands around so the lane index operand is in the
5057    // right place.
5058    TmpInst.setOpcode(getRealVLDLNOpcode(Inst.getOpcode()));
5059    TmpInst.addOperand(Inst.getOperand(0)); // Vd
5060    TmpInst.addOperand(Inst.getOperand(2)); // Rn
5061    TmpInst.addOperand(Inst.getOperand(3)); // alignment
5062    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
5063    TmpInst.addOperand(Inst.getOperand(1)); // lane
5064    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
5065    TmpInst.addOperand(Inst.getOperand(5));
5066    Inst = TmpInst;
5067    return true;
5068  }
5069  // Handle the MOV complex aliases.
5070  case ARM::ASRr:
5071  case ARM::LSRr:
5072  case ARM::LSLr:
5073  case ARM::RORr: {
5074    ARM_AM::ShiftOpc ShiftTy;
5075    switch(Inst.getOpcode()) {
5076    default: llvm_unreachable("unexpected opcode!");
5077    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
5078    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
5079    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
5080    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
5081    }
5082    // A shift by zero is a plain MOVr, not a MOVsi.
5083    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
5084    MCInst TmpInst;
5085    TmpInst.setOpcode(ARM::MOVsr);
5086    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5087    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5088    TmpInst.addOperand(Inst.getOperand(2)); // Rm
5089    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5090    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5091    TmpInst.addOperand(Inst.getOperand(4));
5092    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5093    Inst = TmpInst;
5094    return true;
5095  }
5096  case ARM::ASRi:
5097  case ARM::LSRi:
5098  case ARM::LSLi:
5099  case ARM::RORi: {
5100    ARM_AM::ShiftOpc ShiftTy;
5101    switch(Inst.getOpcode()) {
5102    default: llvm_unreachable("unexpected opcode!");
5103    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
5104    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
5105    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
5106    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
5107    }
5108    // A shift by zero is a plain MOVr, not a MOVsi.
5109    unsigned Amt = Inst.getOperand(2).getImm();
5110    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
5111    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
5112    MCInst TmpInst;
5113    TmpInst.setOpcode(Opc);
5114    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5115    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5116    if (Opc == ARM::MOVsi)
5117      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5118    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
5119    TmpInst.addOperand(Inst.getOperand(4));
5120    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
5121    Inst = TmpInst;
5122    return true;
5123  }
5124  case ARM::RRXi: {
5125    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
5126    MCInst TmpInst;
5127    TmpInst.setOpcode(ARM::MOVsi);
5128    TmpInst.addOperand(Inst.getOperand(0)); // Rd
5129    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5130    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
5131    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5132    TmpInst.addOperand(Inst.getOperand(3));
5133    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
5134    Inst = TmpInst;
5135    return true;
5136  }
5137  case ARM::t2LDMIA_UPD: {
5138    // If this is a load of a single register, then we should use
5139    // a post-indexed LDR instruction instead, per the ARM ARM.
5140    if (Inst.getNumOperands() != 5)
5141      return false;
5142    MCInst TmpInst;
5143    TmpInst.setOpcode(ARM::t2LDR_POST);
5144    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5145    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5146    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5147    TmpInst.addOperand(MCOperand::CreateImm(4));
5148    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5149    TmpInst.addOperand(Inst.getOperand(3));
5150    Inst = TmpInst;
5151    return true;
5152  }
5153  case ARM::t2STMDB_UPD: {
5154    // If this is a store of a single register, then we should use
5155    // a pre-indexed STR instruction instead, per the ARM ARM.
5156    if (Inst.getNumOperands() != 5)
5157      return false;
5158    MCInst TmpInst;
5159    TmpInst.setOpcode(ARM::t2STR_PRE);
5160    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5161    TmpInst.addOperand(Inst.getOperand(4)); // Rt
5162    TmpInst.addOperand(Inst.getOperand(1)); // Rn
5163    TmpInst.addOperand(MCOperand::CreateImm(-4));
5164    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5165    TmpInst.addOperand(Inst.getOperand(3));
5166    Inst = TmpInst;
5167    return true;
5168  }
5169  case ARM::LDMIA_UPD:
5170    // If this is a load of a single register via a 'pop', then we should use
5171    // a post-indexed LDR instruction instead, per the ARM ARM.
5172    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
5173        Inst.getNumOperands() == 5) {
5174      MCInst TmpInst;
5175      TmpInst.setOpcode(ARM::LDR_POST_IMM);
5176      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5177      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5178      TmpInst.addOperand(Inst.getOperand(1)); // Rn
5179      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
5180      TmpInst.addOperand(MCOperand::CreateImm(4));
5181      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5182      TmpInst.addOperand(Inst.getOperand(3));
5183      Inst = TmpInst;
5184      return true;
5185    }
5186    break;
5187  case ARM::STMDB_UPD:
5188    // If this is a store of a single register via a 'push', then we should use
5189    // a pre-indexed STR instruction instead, per the ARM ARM.
5190    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
5191        Inst.getNumOperands() == 5) {
5192      MCInst TmpInst;
5193      TmpInst.setOpcode(ARM::STR_PRE_IMM);
5194      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
5195      TmpInst.addOperand(Inst.getOperand(4)); // Rt
5196      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
5197      TmpInst.addOperand(MCOperand::CreateImm(-4));
5198      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
5199      TmpInst.addOperand(Inst.getOperand(3));
5200      Inst = TmpInst;
5201    }
5202    break;
5203  case ARM::t2ADDri12:
5204    // If the immediate fits for encoding T3 (t2ADDri) and the generic "add"
5205    // mnemonic was used (not "addw"), encoding T3 is preferred.
5206    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "add" ||
5207        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5208      break;
5209    Inst.setOpcode(ARM::t2ADDri);
5210    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5211    break;
5212  case ARM::t2SUBri12:
5213    // If the immediate fits for encoding T3 (t2SUBri) and the generic "sub"
5214    // mnemonic was used (not "subw"), encoding T3 is preferred.
5215    if (static_cast<ARMOperand*>(Operands[0])->getToken() != "sub" ||
5216        ARM_AM::getT2SOImmVal(Inst.getOperand(2).getImm()) == -1)
5217      break;
5218    Inst.setOpcode(ARM::t2SUBri);
5219    Inst.addOperand(MCOperand::CreateReg(0)); // cc_out
5220    break;
5221  case ARM::tADDi8:
5222    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5223    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5224    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5225    // to encoding T1 if <Rd> is omitted."
5226    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5227      Inst.setOpcode(ARM::tADDi3);
5228      return true;
5229    }
5230    break;
5231  case ARM::tSUBi8:
5232    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
5233    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
5234    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
5235    // to encoding T1 if <Rd> is omitted."
5236    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
5237      Inst.setOpcode(ARM::tSUBi3);
5238      return true;
5239    }
5240    break;
5241  case ARM::t2ADDrr: {
5242    // If the destination and first source operand are the same, and
5243    // there's no setting of the flags, use encoding T2 instead of T3.
5244    // Note that this is only for ADD, not SUB. This mirrors the system
5245    // 'as' behaviour. Make sure the wide encoding wasn't explicit.
5246    if (Inst.getOperand(0).getReg() != Inst.getOperand(1).getReg() ||
5247        Inst.getOperand(5).getReg() != 0 ||
5248        (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5249         static_cast<ARMOperand*>(Operands[3])->getToken() == ".w"))
5250      break;
5251    MCInst TmpInst;
5252    TmpInst.setOpcode(ARM::tADDhirr);
5253    TmpInst.addOperand(Inst.getOperand(0));
5254    TmpInst.addOperand(Inst.getOperand(0));
5255    TmpInst.addOperand(Inst.getOperand(2));
5256    TmpInst.addOperand(Inst.getOperand(3));
5257    TmpInst.addOperand(Inst.getOperand(4));
5258    Inst = TmpInst;
5259    return true;
5260  }
5261  case ARM::tB:
5262    // A Thumb conditional branch outside of an IT block is a tBcc.
5263    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
5264      Inst.setOpcode(ARM::tBcc);
5265      return true;
5266    }
5267    break;
5268  case ARM::t2B:
5269    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
5270    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
5271      Inst.setOpcode(ARM::t2Bcc);
5272      return true;
5273    }
5274    break;
5275  case ARM::t2Bcc:
5276    // If the conditional is AL or we're in an IT block, we really want t2B.
5277    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
5278      Inst.setOpcode(ARM::t2B);
5279      return true;
5280    }
5281    break;
5282  case ARM::tBcc:
5283    // If the conditional is AL, we really want tB.
5284    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
5285      Inst.setOpcode(ARM::tB);
5286      return true;
5287    }
5288    break;
5289  case ARM::tLDMIA: {
5290    // If the register list contains any high registers, or if the writeback
5291    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
5292    // instead if we're in Thumb2. Otherwise, this should have generated
5293    // an error in validateInstruction().
5294    unsigned Rn = Inst.getOperand(0).getReg();
5295    bool hasWritebackToken =
5296      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
5297       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
5298    bool listContainsBase;
5299    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
5300        (!listContainsBase && !hasWritebackToken) ||
5301        (listContainsBase && hasWritebackToken)) {
5302      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5303      assert (isThumbTwo());
5304      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
5305      // If we're switching to the updating version, we need to insert
5306      // the writeback tied operand.
5307      if (hasWritebackToken)
5308        Inst.insert(Inst.begin(),
5309                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
5310      return true;
5311    }
5312    break;
5313  }
5314  case ARM::tSTMIA_UPD: {
5315    // If the register list contains any high registers, we need to use
5316    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5317    // should have generated an error in validateInstruction().
5318    unsigned Rn = Inst.getOperand(0).getReg();
5319    bool listContainsBase;
5320    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
5321      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
5322      assert (isThumbTwo());
5323      Inst.setOpcode(ARM::t2STMIA_UPD);
5324      return true;
5325    }
5326    break;
5327  }
5328  case ARM::tPOP: {
5329    bool listContainsBase;
5330    // If the register list contains any high registers, we need to use
5331    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5332    // should have generated an error in validateInstruction().
5333    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
5334      return false;
5335    assert (isThumbTwo());
5336    Inst.setOpcode(ARM::t2LDMIA_UPD);
5337    // Add the base register and writeback operands.
5338    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5339    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5340    return true;
5341  }
5342  case ARM::tPUSH: {
5343    bool listContainsBase;
5344    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
5345      return false;
5346    assert (isThumbTwo());
5347    Inst.setOpcode(ARM::t2STMDB_UPD);
5348    // Add the base register and writeback operands.
5349    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5350    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5351    return true;
5352  }
5353  case ARM::t2MOVi: {
5354    // If we can use the 16-bit encoding and the user didn't explicitly
5355    // request the 32-bit variant, transform it here.
5356    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5357        Inst.getOperand(1).getImm() <= 255 &&
5358        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
5359         Inst.getOperand(4).getReg() == ARM::CPSR) ||
5360        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
5361        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5362         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5363      // The operands aren't in the same order for tMOVi8...
5364      MCInst TmpInst;
5365      TmpInst.setOpcode(ARM::tMOVi8);
5366      TmpInst.addOperand(Inst.getOperand(0));
5367      TmpInst.addOperand(Inst.getOperand(4));
5368      TmpInst.addOperand(Inst.getOperand(1));
5369      TmpInst.addOperand(Inst.getOperand(2));
5370      TmpInst.addOperand(Inst.getOperand(3));
5371      Inst = TmpInst;
5372      return true;
5373    }
5374    break;
5375  }
5376  case ARM::t2MOVr: {
5377    // If we can use the 16-bit encoding and the user didn't explicitly
5378    // request the 32-bit variant, transform it here.
5379    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5380        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5381        Inst.getOperand(2).getImm() == ARMCC::AL &&
5382        Inst.getOperand(4).getReg() == ARM::CPSR &&
5383        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5384         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5385      // The operands aren't the same for tMOV[S]r... (no cc_out)
5386      MCInst TmpInst;
5387      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
5388      TmpInst.addOperand(Inst.getOperand(0));
5389      TmpInst.addOperand(Inst.getOperand(1));
5390      TmpInst.addOperand(Inst.getOperand(2));
5391      TmpInst.addOperand(Inst.getOperand(3));
5392      Inst = TmpInst;
5393      return true;
5394    }
5395    break;
5396  }
5397  case ARM::t2SXTH:
5398  case ARM::t2SXTB:
5399  case ARM::t2UXTH:
5400  case ARM::t2UXTB: {
5401    // If we can use the 16-bit encoding and the user didn't explicitly
5402    // request the 32-bit variant, transform it here.
5403    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5404        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5405        Inst.getOperand(2).getImm() == 0 &&
5406        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5407         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5408      unsigned NewOpc;
5409      switch (Inst.getOpcode()) {
5410      default: llvm_unreachable("Illegal opcode!");
5411      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
5412      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
5413      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
5414      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
5415      }
5416      // The operands aren't the same for thumb1 (no rotate operand).
5417      MCInst TmpInst;
5418      TmpInst.setOpcode(NewOpc);
5419      TmpInst.addOperand(Inst.getOperand(0));
5420      TmpInst.addOperand(Inst.getOperand(1));
5421      TmpInst.addOperand(Inst.getOperand(3));
5422      TmpInst.addOperand(Inst.getOperand(4));
5423      Inst = TmpInst;
5424      return true;
5425    }
5426    break;
5427  }
5428  case ARM::t2IT: {
5429    // The mask bits for all but the first condition are represented as
5430    // the low bit of the condition code value implies 't'. We currently
5431    // always have 1 implies 't', so XOR toggle the bits if the low bit
5432    // of the condition code is zero. The encoding also expects the low
5433    // bit of the condition to be encoded as bit 4 of the mask operand,
5434    // so mask that in if needed
5435    MCOperand &MO = Inst.getOperand(1);
5436    unsigned Mask = MO.getImm();
5437    unsigned OrigMask = Mask;
5438    unsigned TZ = CountTrailingZeros_32(Mask);
5439    if ((Inst.getOperand(0).getImm() & 1) == 0) {
5440      assert(Mask && TZ <= 3 && "illegal IT mask value!");
5441      for (unsigned i = 3; i != TZ; --i)
5442        Mask ^= 1 << i;
5443    } else
5444      Mask |= 0x10;
5445    MO.setImm(Mask);
5446
5447    // Set up the IT block state according to the IT instruction we just
5448    // matched.
5449    assert(!inITBlock() && "nested IT blocks?!");
5450    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
5451    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
5452    ITState.CurPosition = 0;
5453    ITState.FirstCond = true;
5454    break;
5455  }
5456  }
5457  return false;
5458}
5459
5460unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
5461  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
5462  // suffix depending on whether they're in an IT block or not.
5463  unsigned Opc = Inst.getOpcode();
5464  const MCInstrDesc &MCID = getInstDesc(Opc);
5465  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
5466    assert(MCID.hasOptionalDef() &&
5467           "optionally flag setting instruction missing optional def operand");
5468    assert(MCID.NumOperands == Inst.getNumOperands() &&
5469           "operand count mismatch!");
5470    // Find the optional-def operand (cc_out).
5471    unsigned OpNo;
5472    for (OpNo = 0;
5473         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
5474         ++OpNo)
5475      ;
5476    // If we're parsing Thumb1, reject it completely.
5477    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
5478      return Match_MnemonicFail;
5479    // If we're parsing Thumb2, which form is legal depends on whether we're
5480    // in an IT block.
5481    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
5482        !inITBlock())
5483      return Match_RequiresITBlock;
5484    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
5485        inITBlock())
5486      return Match_RequiresNotITBlock;
5487  }
5488  // Some high-register supporting Thumb1 encodings only allow both registers
5489  // to be from r0-r7 when in Thumb2.
5490  else if (Opc == ARM::tADDhirr && isThumbOne() &&
5491           isARMLowRegister(Inst.getOperand(1).getReg()) &&
5492           isARMLowRegister(Inst.getOperand(2).getReg()))
5493    return Match_RequiresThumb2;
5494  // Others only require ARMv6 or later.
5495  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
5496           isARMLowRegister(Inst.getOperand(0).getReg()) &&
5497           isARMLowRegister(Inst.getOperand(1).getReg()))
5498    return Match_RequiresV6;
5499  return Match_Success;
5500}
5501
5502bool ARMAsmParser::
5503MatchAndEmitInstruction(SMLoc IDLoc,
5504                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
5505                        MCStreamer &Out) {
5506  MCInst Inst;
5507  unsigned ErrorInfo;
5508  unsigned MatchResult;
5509  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
5510  switch (MatchResult) {
5511  default: break;
5512  case Match_Success:
5513    // Context sensitive operand constraints aren't handled by the matcher,
5514    // so check them here.
5515    if (validateInstruction(Inst, Operands)) {
5516      // Still progress the IT block, otherwise one wrong condition causes
5517      // nasty cascading errors.
5518      forwardITPosition();
5519      return true;
5520    }
5521
5522    // Some instructions need post-processing to, for example, tweak which
5523    // encoding is selected. Loop on it while changes happen so the
5524    // individual transformations can chain off each other. E.g.,
5525    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
5526    while (processInstruction(Inst, Operands))
5527      ;
5528
5529    // Only move forward at the very end so that everything in validate
5530    // and process gets a consistent answer about whether we're in an IT
5531    // block.
5532    forwardITPosition();
5533
5534    Out.EmitInstruction(Inst);
5535    return false;
5536  case Match_MissingFeature:
5537    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
5538    return true;
5539  case Match_InvalidOperand: {
5540    SMLoc ErrorLoc = IDLoc;
5541    if (ErrorInfo != ~0U) {
5542      if (ErrorInfo >= Operands.size())
5543        return Error(IDLoc, "too few operands for instruction");
5544
5545      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
5546      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
5547    }
5548
5549    return Error(ErrorLoc, "invalid operand for instruction");
5550  }
5551  case Match_MnemonicFail:
5552    return Error(IDLoc, "invalid instruction");
5553  case Match_ConversionFail:
5554    // The converter function will have already emited a diagnostic.
5555    return true;
5556  case Match_RequiresNotITBlock:
5557    return Error(IDLoc, "flag setting instruction only valid outside IT block");
5558  case Match_RequiresITBlock:
5559    return Error(IDLoc, "instruction only valid inside IT block");
5560  case Match_RequiresV6:
5561    return Error(IDLoc, "instruction variant requires ARMv6 or later");
5562  case Match_RequiresThumb2:
5563    return Error(IDLoc, "instruction variant requires Thumb2");
5564  }
5565
5566  llvm_unreachable("Implement any new match types added!");
5567  return true;
5568}
5569
5570/// parseDirective parses the arm specific directives
5571bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
5572  StringRef IDVal = DirectiveID.getIdentifier();
5573  if (IDVal == ".word")
5574    return parseDirectiveWord(4, DirectiveID.getLoc());
5575  else if (IDVal == ".thumb")
5576    return parseDirectiveThumb(DirectiveID.getLoc());
5577  else if (IDVal == ".thumb_func")
5578    return parseDirectiveThumbFunc(DirectiveID.getLoc());
5579  else if (IDVal == ".code")
5580    return parseDirectiveCode(DirectiveID.getLoc());
5581  else if (IDVal == ".syntax")
5582    return parseDirectiveSyntax(DirectiveID.getLoc());
5583  return true;
5584}
5585
5586/// parseDirectiveWord
5587///  ::= .word [ expression (, expression)* ]
5588bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
5589  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5590    for (;;) {
5591      const MCExpr *Value;
5592      if (getParser().ParseExpression(Value))
5593        return true;
5594
5595      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
5596
5597      if (getLexer().is(AsmToken::EndOfStatement))
5598        break;
5599
5600      // FIXME: Improve diagnostic.
5601      if (getLexer().isNot(AsmToken::Comma))
5602        return Error(L, "unexpected token in directive");
5603      Parser.Lex();
5604    }
5605  }
5606
5607  Parser.Lex();
5608  return false;
5609}
5610
5611/// parseDirectiveThumb
5612///  ::= .thumb
5613bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
5614  if (getLexer().isNot(AsmToken::EndOfStatement))
5615    return Error(L, "unexpected token in directive");
5616  Parser.Lex();
5617
5618  // TODO: set thumb mode
5619  // TODO: tell the MC streamer the mode
5620  // getParser().getStreamer().Emit???();
5621  return false;
5622}
5623
5624/// parseDirectiveThumbFunc
5625///  ::= .thumbfunc symbol_name
5626bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
5627  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
5628  bool isMachO = MAI.hasSubsectionsViaSymbols();
5629  StringRef Name;
5630
5631  // Darwin asm has function name after .thumb_func direction
5632  // ELF doesn't
5633  if (isMachO) {
5634    const AsmToken &Tok = Parser.getTok();
5635    if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
5636      return Error(L, "unexpected token in .thumb_func directive");
5637    Name = Tok.getIdentifier();
5638    Parser.Lex(); // Consume the identifier token.
5639  }
5640
5641 if (getLexer().isNot(AsmToken::EndOfStatement))
5642    return Error(L, "unexpected token in directive");
5643  Parser.Lex();
5644
5645  // FIXME: assuming function name will be the line following .thumb_func
5646  if (!isMachO) {
5647    Name = Parser.getTok().getIdentifier();
5648  }
5649
5650  // Mark symbol as a thumb symbol.
5651  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
5652  getParser().getStreamer().EmitThumbFunc(Func);
5653  return false;
5654}
5655
5656/// parseDirectiveSyntax
5657///  ::= .syntax unified | divided
5658bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
5659  const AsmToken &Tok = Parser.getTok();
5660  if (Tok.isNot(AsmToken::Identifier))
5661    return Error(L, "unexpected token in .syntax directive");
5662  StringRef Mode = Tok.getString();
5663  if (Mode == "unified" || Mode == "UNIFIED")
5664    Parser.Lex();
5665  else if (Mode == "divided" || Mode == "DIVIDED")
5666    return Error(L, "'.syntax divided' arm asssembly not supported");
5667  else
5668    return Error(L, "unrecognized syntax mode in .syntax directive");
5669
5670  if (getLexer().isNot(AsmToken::EndOfStatement))
5671    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
5672  Parser.Lex();
5673
5674  // TODO tell the MC streamer the mode
5675  // getParser().getStreamer().Emit???();
5676  return false;
5677}
5678
5679/// parseDirectiveCode
5680///  ::= .code 16 | 32
5681bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
5682  const AsmToken &Tok = Parser.getTok();
5683  if (Tok.isNot(AsmToken::Integer))
5684    return Error(L, "unexpected token in .code directive");
5685  int64_t Val = Parser.getTok().getIntVal();
5686  if (Val == 16)
5687    Parser.Lex();
5688  else if (Val == 32)
5689    Parser.Lex();
5690  else
5691    return Error(L, "invalid operand to .code directive");
5692
5693  if (getLexer().isNot(AsmToken::EndOfStatement))
5694    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
5695  Parser.Lex();
5696
5697  if (Val == 16) {
5698    if (!isThumb())
5699      SwitchMode();
5700    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
5701  } else {
5702    if (isThumb())
5703      SwitchMode();
5704    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
5705  }
5706
5707  return false;
5708}
5709
5710extern "C" void LLVMInitializeARMAsmLexer();
5711
5712/// Force static initialization.
5713extern "C" void LLVMInitializeARMAsmParser() {
5714  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
5715  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
5716  LLVMInitializeARMAsmLexer();
5717}
5718
5719#define GET_REGISTER_MATCHER
5720#define GET_MATCHER_IMPLEMENTATION
5721#include "ARMGenAsmMatcher.inc"
5722