ARMAsmParser.cpp revision 7636bf6530fd83bf7356ae3894246a4e558741a4
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9
10#include "MCTargetDesc/ARMBaseInfo.h"
11#include "MCTargetDesc/ARMAddressingModes.h"
12#include "MCTargetDesc/ARMMCExpr.h"
13#include "llvm/MC/MCParser/MCAsmLexer.h"
14#include "llvm/MC/MCParser/MCAsmParser.h"
15#include "llvm/MC/MCParser/MCParsedAsmOperand.h"
16#include "llvm/MC/MCAsmInfo.h"
17#include "llvm/MC/MCContext.h"
18#include "llvm/MC/MCStreamer.h"
19#include "llvm/MC/MCExpr.h"
20#include "llvm/MC/MCInst.h"
21#include "llvm/MC/MCInstrDesc.h"
22#include "llvm/MC/MCRegisterInfo.h"
23#include "llvm/MC/MCSubtargetInfo.h"
24#include "llvm/MC/MCTargetAsmParser.h"
25#include "llvm/Support/MathExtras.h"
26#include "llvm/Support/SourceMgr.h"
27#include "llvm/Support/TargetRegistry.h"
28#include "llvm/Support/raw_ostream.h"
29#include "llvm/ADT/BitVector.h"
30#include "llvm/ADT/OwningPtr.h"
31#include "llvm/ADT/STLExtras.h"
32#include "llvm/ADT/SmallVector.h"
33#include "llvm/ADT/StringSwitch.h"
34#include "llvm/ADT/Twine.h"
35
36using namespace llvm;
37
38namespace {
39
40class ARMOperand;
41
42enum VectorLaneTy { NoLanes, AllLanes, IndexedLane };
43
44class ARMAsmParser : public MCTargetAsmParser {
45  MCSubtargetInfo &STI;
46  MCAsmParser &Parser;
47
48  struct {
49    ARMCC::CondCodes Cond;    // Condition for IT block.
50    unsigned Mask:4;          // Condition mask for instructions.
51                              // Starting at first 1 (from lsb).
52                              //   '1'  condition as indicated in IT.
53                              //   '0'  inverse of condition (else).
54                              // Count of instructions in IT block is
55                              // 4 - trailingzeroes(mask)
56
57    bool FirstCond;           // Explicit flag for when we're parsing the
58                              // First instruction in the IT block. It's
59                              // implied in the mask, so needs special
60                              // handling.
61
62    unsigned CurPosition;     // Current position in parsing of IT
63                              // block. In range [0,3]. Initialized
64                              // according to count of instructions in block.
65                              // ~0U if no active IT block.
66  } ITState;
67  bool inITBlock() { return ITState.CurPosition != ~0U;}
68  void forwardITPosition() {
69    if (!inITBlock()) return;
70    // Move to the next instruction in the IT block, if there is one. If not,
71    // mark the block as done.
72    unsigned TZ = CountTrailingZeros_32(ITState.Mask);
73    if (++ITState.CurPosition == 5 - TZ)
74      ITState.CurPosition = ~0U; // Done with the IT block after this.
75  }
76
77
78  MCAsmParser &getParser() const { return Parser; }
79  MCAsmLexer &getLexer() const { return Parser.getLexer(); }
80
81  void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); }
82  bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); }
83
84  int tryParseRegister();
85  bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &);
86  int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &);
87  bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &);
88  bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &);
89  bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic);
90  bool parsePrefix(ARMMCExpr::VariantKind &RefKind);
91  bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType,
92                              unsigned &ShiftAmount);
93  bool parseDirectiveWord(unsigned Size, SMLoc L);
94  bool parseDirectiveThumb(SMLoc L);
95  bool parseDirectiveThumbFunc(SMLoc L);
96  bool parseDirectiveCode(SMLoc L);
97  bool parseDirectiveSyntax(SMLoc L);
98
99  StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode,
100                          bool &CarrySetting, unsigned &ProcessorIMod,
101                          StringRef &ITMask);
102  void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
103                             bool &CanAcceptPredicationCode);
104
105  bool isThumb() const {
106    // FIXME: Can tablegen auto-generate this?
107    return (STI.getFeatureBits() & ARM::ModeThumb) != 0;
108  }
109  bool isThumbOne() const {
110    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0;
111  }
112  bool isThumbTwo() const {
113    return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2);
114  }
115  bool hasV6Ops() const {
116    return STI.getFeatureBits() & ARM::HasV6Ops;
117  }
118  bool hasV7Ops() const {
119    return STI.getFeatureBits() & ARM::HasV7Ops;
120  }
121  void SwitchMode() {
122    unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb));
123    setAvailableFeatures(FB);
124  }
125  bool isMClass() const {
126    return STI.getFeatureBits() & ARM::FeatureMClass;
127  }
128
129  /// @name Auto-generated Match Functions
130  /// {
131
132#define GET_ASSEMBLER_HEADER
133#include "ARMGenAsmMatcher.inc"
134
135  /// }
136
137  OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&);
138  OperandMatchResultTy parseCoprocNumOperand(
139    SmallVectorImpl<MCParsedAsmOperand*>&);
140  OperandMatchResultTy parseCoprocRegOperand(
141    SmallVectorImpl<MCParsedAsmOperand*>&);
142  OperandMatchResultTy parseCoprocOptionOperand(
143    SmallVectorImpl<MCParsedAsmOperand*>&);
144  OperandMatchResultTy parseMemBarrierOptOperand(
145    SmallVectorImpl<MCParsedAsmOperand*>&);
146  OperandMatchResultTy parseProcIFlagsOperand(
147    SmallVectorImpl<MCParsedAsmOperand*>&);
148  OperandMatchResultTy parseMSRMaskOperand(
149    SmallVectorImpl<MCParsedAsmOperand*>&);
150  OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O,
151                                   StringRef Op, int Low, int High);
152  OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
153    return parsePKHImm(O, "lsl", 0, 31);
154  }
155  OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) {
156    return parsePKHImm(O, "asr", 1, 32);
157  }
158  OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&);
159  OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&);
160  OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&);
161  OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&);
162  OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&);
163  OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&);
164  OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&);
165  OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&);
166  OperandMatchResultTy parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index);
167
168  // Asm Match Converter Methods
169  bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
170                    const SmallVectorImpl<MCParsedAsmOperand*> &);
171  bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
172                    const SmallVectorImpl<MCParsedAsmOperand*> &);
173  bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
174                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
175  bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
176                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
177  bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
178                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
179  bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
180                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
181  bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
182                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
183  bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
184                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
185  bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
186                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
187  bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
188                             const SmallVectorImpl<MCParsedAsmOperand*> &);
189  bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
190                             const SmallVectorImpl<MCParsedAsmOperand*> &);
191  bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
192                             const SmallVectorImpl<MCParsedAsmOperand*> &);
193  bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
194                             const SmallVectorImpl<MCParsedAsmOperand*> &);
195  bool cvtLdrdPre(MCInst &Inst, unsigned Opcode,
196                  const SmallVectorImpl<MCParsedAsmOperand*> &);
197  bool cvtStrdPre(MCInst &Inst, unsigned Opcode,
198                  const SmallVectorImpl<MCParsedAsmOperand*> &);
199  bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
200                                  const SmallVectorImpl<MCParsedAsmOperand*> &);
201  bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
202                        const SmallVectorImpl<MCParsedAsmOperand*> &);
203  bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
204                     const SmallVectorImpl<MCParsedAsmOperand*> &);
205  bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
206                        const SmallVectorImpl<MCParsedAsmOperand*> &);
207  bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
208                     const SmallVectorImpl<MCParsedAsmOperand*> &);
209  bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
210                        const SmallVectorImpl<MCParsedAsmOperand*> &);
211
212  bool validateInstruction(MCInst &Inst,
213                           const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
214  bool processInstruction(MCInst &Inst,
215                          const SmallVectorImpl<MCParsedAsmOperand*> &Ops);
216  bool shouldOmitCCOutOperand(StringRef Mnemonic,
217                              SmallVectorImpl<MCParsedAsmOperand*> &Operands);
218
219public:
220  enum ARMMatchResultTy {
221    Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY,
222    Match_RequiresNotITBlock,
223    Match_RequiresV6,
224    Match_RequiresThumb2
225  };
226
227  ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser)
228    : MCTargetAsmParser(), STI(_STI), Parser(_Parser) {
229    MCAsmParserExtension::Initialize(_Parser);
230
231    // Initialize the set of available features.
232    setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits()));
233
234    // Not in an ITBlock to start with.
235    ITState.CurPosition = ~0U;
236  }
237
238  // Implementation of the MCTargetAsmParser interface:
239  bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc);
240  bool ParseInstruction(StringRef Name, SMLoc NameLoc,
241                        SmallVectorImpl<MCParsedAsmOperand*> &Operands);
242  bool ParseDirective(AsmToken DirectiveID);
243
244  unsigned checkTargetMatchPredicate(MCInst &Inst);
245
246  bool MatchAndEmitInstruction(SMLoc IDLoc,
247                               SmallVectorImpl<MCParsedAsmOperand*> &Operands,
248                               MCStreamer &Out);
249};
250} // end anonymous namespace
251
252namespace {
253
254/// ARMOperand - Instances of this class represent a parsed ARM machine
255/// instruction.
256class ARMOperand : public MCParsedAsmOperand {
257  enum KindTy {
258    k_CondCode,
259    k_CCOut,
260    k_ITCondMask,
261    k_CoprocNum,
262    k_CoprocReg,
263    k_CoprocOption,
264    k_Immediate,
265    k_FPImmediate,
266    k_MemBarrierOpt,
267    k_Memory,
268    k_PostIndexRegister,
269    k_MSRMask,
270    k_ProcIFlags,
271    k_VectorIndex,
272    k_Register,
273    k_RegisterList,
274    k_DPRRegisterList,
275    k_SPRRegisterList,
276    k_VectorList,
277    k_VectorListAllLanes,
278    k_VectorListIndexed,
279    k_ShiftedRegister,
280    k_ShiftedImmediate,
281    k_ShifterImmediate,
282    k_RotateImmediate,
283    k_BitfieldDescriptor,
284    k_Token
285  } Kind;
286
287  SMLoc StartLoc, EndLoc;
288  SmallVector<unsigned, 8> Registers;
289
290  union {
291    struct {
292      ARMCC::CondCodes Val;
293    } CC;
294
295    struct {
296      unsigned Val;
297    } Cop;
298
299    struct {
300      unsigned Val;
301    } CoprocOption;
302
303    struct {
304      unsigned Mask:4;
305    } ITMask;
306
307    struct {
308      ARM_MB::MemBOpt Val;
309    } MBOpt;
310
311    struct {
312      ARM_PROC::IFlags Val;
313    } IFlags;
314
315    struct {
316      unsigned Val;
317    } MMask;
318
319    struct {
320      const char *Data;
321      unsigned Length;
322    } Tok;
323
324    struct {
325      unsigned RegNum;
326    } Reg;
327
328    // A vector register list is a sequential list of 1 to 4 registers.
329    struct {
330      unsigned RegNum;
331      unsigned Count;
332      unsigned LaneIndex;
333    } VectorList;
334
335    struct {
336      unsigned Val;
337    } VectorIndex;
338
339    struct {
340      const MCExpr *Val;
341    } Imm;
342
343    struct {
344      unsigned Val;       // encoded 8-bit representation
345    } FPImm;
346
347    /// Combined record for all forms of ARM address expressions.
348    struct {
349      unsigned BaseRegNum;
350      // Offset is in OffsetReg or OffsetImm. If both are zero, no offset
351      // was specified.
352      const MCConstantExpr *OffsetImm;  // Offset immediate value
353      unsigned OffsetRegNum;    // Offset register num, when OffsetImm == NULL
354      ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg
355      unsigned ShiftImm;        // shift for OffsetReg.
356      unsigned Alignment;       // 0 = no alignment specified
357                                // n = alignment in bytes (8, 16, or 32)
358      unsigned isNegative : 1;  // Negated OffsetReg? (~'U' bit)
359    } Memory;
360
361    struct {
362      unsigned RegNum;
363      bool isAdd;
364      ARM_AM::ShiftOpc ShiftTy;
365      unsigned ShiftImm;
366    } PostIdxReg;
367
368    struct {
369      bool isASR;
370      unsigned Imm;
371    } ShifterImm;
372    struct {
373      ARM_AM::ShiftOpc ShiftTy;
374      unsigned SrcReg;
375      unsigned ShiftReg;
376      unsigned ShiftImm;
377    } RegShiftedReg;
378    struct {
379      ARM_AM::ShiftOpc ShiftTy;
380      unsigned SrcReg;
381      unsigned ShiftImm;
382    } RegShiftedImm;
383    struct {
384      unsigned Imm;
385    } RotImm;
386    struct {
387      unsigned LSB;
388      unsigned Width;
389    } Bitfield;
390  };
391
392  ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {}
393public:
394  ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() {
395    Kind = o.Kind;
396    StartLoc = o.StartLoc;
397    EndLoc = o.EndLoc;
398    switch (Kind) {
399    case k_CondCode:
400      CC = o.CC;
401      break;
402    case k_ITCondMask:
403      ITMask = o.ITMask;
404      break;
405    case k_Token:
406      Tok = o.Tok;
407      break;
408    case k_CCOut:
409    case k_Register:
410      Reg = o.Reg;
411      break;
412    case k_RegisterList:
413    case k_DPRRegisterList:
414    case k_SPRRegisterList:
415      Registers = o.Registers;
416      break;
417    case k_VectorList:
418    case k_VectorListAllLanes:
419    case k_VectorListIndexed:
420      VectorList = o.VectorList;
421      break;
422    case k_CoprocNum:
423    case k_CoprocReg:
424      Cop = o.Cop;
425      break;
426    case k_CoprocOption:
427      CoprocOption = o.CoprocOption;
428      break;
429    case k_Immediate:
430      Imm = o.Imm;
431      break;
432    case k_FPImmediate:
433      FPImm = o.FPImm;
434      break;
435    case k_MemBarrierOpt:
436      MBOpt = o.MBOpt;
437      break;
438    case k_Memory:
439      Memory = o.Memory;
440      break;
441    case k_PostIndexRegister:
442      PostIdxReg = o.PostIdxReg;
443      break;
444    case k_MSRMask:
445      MMask = o.MMask;
446      break;
447    case k_ProcIFlags:
448      IFlags = o.IFlags;
449      break;
450    case k_ShifterImmediate:
451      ShifterImm = o.ShifterImm;
452      break;
453    case k_ShiftedRegister:
454      RegShiftedReg = o.RegShiftedReg;
455      break;
456    case k_ShiftedImmediate:
457      RegShiftedImm = o.RegShiftedImm;
458      break;
459    case k_RotateImmediate:
460      RotImm = o.RotImm;
461      break;
462    case k_BitfieldDescriptor:
463      Bitfield = o.Bitfield;
464      break;
465    case k_VectorIndex:
466      VectorIndex = o.VectorIndex;
467      break;
468    }
469  }
470
471  /// getStartLoc - Get the location of the first token of this operand.
472  SMLoc getStartLoc() const { return StartLoc; }
473  /// getEndLoc - Get the location of the last token of this operand.
474  SMLoc getEndLoc() const { return EndLoc; }
475
476  ARMCC::CondCodes getCondCode() const {
477    assert(Kind == k_CondCode && "Invalid access!");
478    return CC.Val;
479  }
480
481  unsigned getCoproc() const {
482    assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!");
483    return Cop.Val;
484  }
485
486  StringRef getToken() const {
487    assert(Kind == k_Token && "Invalid access!");
488    return StringRef(Tok.Data, Tok.Length);
489  }
490
491  unsigned getReg() const {
492    assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!");
493    return Reg.RegNum;
494  }
495
496  const SmallVectorImpl<unsigned> &getRegList() const {
497    assert((Kind == k_RegisterList || Kind == k_DPRRegisterList ||
498            Kind == k_SPRRegisterList) && "Invalid access!");
499    return Registers;
500  }
501
502  const MCExpr *getImm() const {
503    assert(Kind == k_Immediate && "Invalid access!");
504    return Imm.Val;
505  }
506
507  unsigned getFPImm() const {
508    assert(Kind == k_FPImmediate && "Invalid access!");
509    return FPImm.Val;
510  }
511
512  unsigned getVectorIndex() const {
513    assert(Kind == k_VectorIndex && "Invalid access!");
514    return VectorIndex.Val;
515  }
516
517  ARM_MB::MemBOpt getMemBarrierOpt() const {
518    assert(Kind == k_MemBarrierOpt && "Invalid access!");
519    return MBOpt.Val;
520  }
521
522  ARM_PROC::IFlags getProcIFlags() const {
523    assert(Kind == k_ProcIFlags && "Invalid access!");
524    return IFlags.Val;
525  }
526
527  unsigned getMSRMask() const {
528    assert(Kind == k_MSRMask && "Invalid access!");
529    return MMask.Val;
530  }
531
532  bool isCoprocNum() const { return Kind == k_CoprocNum; }
533  bool isCoprocReg() const { return Kind == k_CoprocReg; }
534  bool isCoprocOption() const { return Kind == k_CoprocOption; }
535  bool isCondCode() const { return Kind == k_CondCode; }
536  bool isCCOut() const { return Kind == k_CCOut; }
537  bool isITMask() const { return Kind == k_ITCondMask; }
538  bool isITCondCode() const { return Kind == k_CondCode; }
539  bool isImm() const { return Kind == k_Immediate; }
540  bool isFPImm() const { return Kind == k_FPImmediate; }
541  bool isImm8s4() const {
542    if (Kind != k_Immediate)
543      return false;
544    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
545    if (!CE) return false;
546    int64_t Value = CE->getValue();
547    return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020;
548  }
549  bool isImm0_1020s4() const {
550    if (Kind != k_Immediate)
551      return false;
552    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
553    if (!CE) return false;
554    int64_t Value = CE->getValue();
555    return ((Value & 3) == 0) && Value >= 0 && Value <= 1020;
556  }
557  bool isImm0_508s4() const {
558    if (Kind != k_Immediate)
559      return false;
560    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
561    if (!CE) return false;
562    int64_t Value = CE->getValue();
563    return ((Value & 3) == 0) && Value >= 0 && Value <= 508;
564  }
565  bool isImm0_255() const {
566    if (Kind != k_Immediate)
567      return false;
568    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
569    if (!CE) return false;
570    int64_t Value = CE->getValue();
571    return Value >= 0 && Value < 256;
572  }
573  bool isImm0_7() const {
574    if (Kind != k_Immediate)
575      return false;
576    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
577    if (!CE) return false;
578    int64_t Value = CE->getValue();
579    return Value >= 0 && Value < 8;
580  }
581  bool isImm0_15() const {
582    if (Kind != k_Immediate)
583      return false;
584    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
585    if (!CE) return false;
586    int64_t Value = CE->getValue();
587    return Value >= 0 && Value < 16;
588  }
589  bool isImm0_31() const {
590    if (Kind != k_Immediate)
591      return false;
592    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
593    if (!CE) return false;
594    int64_t Value = CE->getValue();
595    return Value >= 0 && Value < 32;
596  }
597  bool isImm1_16() const {
598    if (Kind != k_Immediate)
599      return false;
600    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
601    if (!CE) return false;
602    int64_t Value = CE->getValue();
603    return Value > 0 && Value < 17;
604  }
605  bool isImm1_32() const {
606    if (Kind != k_Immediate)
607      return false;
608    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
609    if (!CE) return false;
610    int64_t Value = CE->getValue();
611    return Value > 0 && Value < 33;
612  }
613  bool isImm0_32() const {
614    if (Kind != k_Immediate)
615      return false;
616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
617    if (!CE) return false;
618    int64_t Value = CE->getValue();
619    return Value >= 0 && Value < 33;
620  }
621  bool isImm0_65535() const {
622    if (Kind != k_Immediate)
623      return false;
624    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
625    if (!CE) return false;
626    int64_t Value = CE->getValue();
627    return Value >= 0 && Value < 65536;
628  }
629  bool isImm0_65535Expr() const {
630    if (Kind != k_Immediate)
631      return false;
632    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
633    // If it's not a constant expression, it'll generate a fixup and be
634    // handled later.
635    if (!CE) return true;
636    int64_t Value = CE->getValue();
637    return Value >= 0 && Value < 65536;
638  }
639  bool isImm24bit() const {
640    if (Kind != k_Immediate)
641      return false;
642    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
643    if (!CE) return false;
644    int64_t Value = CE->getValue();
645    return Value >= 0 && Value <= 0xffffff;
646  }
647  bool isImmThumbSR() const {
648    if (Kind != k_Immediate)
649      return false;
650    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
651    if (!CE) return false;
652    int64_t Value = CE->getValue();
653    return Value > 0 && Value < 33;
654  }
655  bool isPKHLSLImm() const {
656    if (Kind != k_Immediate)
657      return false;
658    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
659    if (!CE) return false;
660    int64_t Value = CE->getValue();
661    return Value >= 0 && Value < 32;
662  }
663  bool isPKHASRImm() const {
664    if (Kind != k_Immediate)
665      return false;
666    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
667    if (!CE) return false;
668    int64_t Value = CE->getValue();
669    return Value > 0 && Value <= 32;
670  }
671  bool isARMSOImm() const {
672    if (Kind != k_Immediate)
673      return false;
674    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
675    if (!CE) return false;
676    int64_t Value = CE->getValue();
677    return ARM_AM::getSOImmVal(Value) != -1;
678  }
679  bool isARMSOImmNot() const {
680    if (Kind != k_Immediate)
681      return false;
682    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
683    if (!CE) return false;
684    int64_t Value = CE->getValue();
685    return ARM_AM::getSOImmVal(~Value) != -1;
686  }
687  bool isT2SOImm() const {
688    if (Kind != k_Immediate)
689      return false;
690    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
691    if (!CE) return false;
692    int64_t Value = CE->getValue();
693    return ARM_AM::getT2SOImmVal(Value) != -1;
694  }
695  bool isT2SOImmNot() const {
696    if (Kind != k_Immediate)
697      return false;
698    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
699    if (!CE) return false;
700    int64_t Value = CE->getValue();
701    return ARM_AM::getT2SOImmVal(~Value) != -1;
702  }
703  bool isSetEndImm() const {
704    if (Kind != k_Immediate)
705      return false;
706    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
707    if (!CE) return false;
708    int64_t Value = CE->getValue();
709    return Value == 1 || Value == 0;
710  }
711  bool isReg() const { return Kind == k_Register; }
712  bool isRegList() const { return Kind == k_RegisterList; }
713  bool isDPRRegList() const { return Kind == k_DPRRegisterList; }
714  bool isSPRRegList() const { return Kind == k_SPRRegisterList; }
715  bool isToken() const { return Kind == k_Token; }
716  bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; }
717  bool isMemory() const { return Kind == k_Memory; }
718  bool isShifterImm() const { return Kind == k_ShifterImmediate; }
719  bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; }
720  bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; }
721  bool isRotImm() const { return Kind == k_RotateImmediate; }
722  bool isBitfield() const { return Kind == k_BitfieldDescriptor; }
723  bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; }
724  bool isPostIdxReg() const {
725    return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift;
726  }
727  bool isMemNoOffset(bool alignOK = false) const {
728    if (!isMemory())
729      return false;
730    // No offset of any kind.
731    return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 &&
732     (alignOK || Memory.Alignment == 0);
733  }
734  bool isAlignedMemory() const {
735    return isMemNoOffset(true);
736  }
737  bool isAddrMode2() const {
738    if (!isMemory() || Memory.Alignment != 0) return false;
739    // Check for register offset.
740    if (Memory.OffsetRegNum) return true;
741    // Immediate offset in range [-4095, 4095].
742    if (!Memory.OffsetImm) return true;
743    int64_t Val = Memory.OffsetImm->getValue();
744    return Val > -4096 && Val < 4096;
745  }
746  bool isAM2OffsetImm() const {
747    if (Kind != k_Immediate)
748      return false;
749    // Immediate offset in range [-4095, 4095].
750    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
751    if (!CE) return false;
752    int64_t Val = CE->getValue();
753    return Val > -4096 && Val < 4096;
754  }
755  bool isAddrMode3() const {
756    if (!isMemory() || Memory.Alignment != 0) return false;
757    // No shifts are legal for AM3.
758    if (Memory.ShiftType != ARM_AM::no_shift) return false;
759    // Check for register offset.
760    if (Memory.OffsetRegNum) return true;
761    // Immediate offset in range [-255, 255].
762    if (!Memory.OffsetImm) return true;
763    int64_t Val = Memory.OffsetImm->getValue();
764    return Val > -256 && Val < 256;
765  }
766  bool isAM3Offset() const {
767    if (Kind != k_Immediate && Kind != k_PostIndexRegister)
768      return false;
769    if (Kind == k_PostIndexRegister)
770      return PostIdxReg.ShiftTy == ARM_AM::no_shift;
771    // Immediate offset in range [-255, 255].
772    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
773    if (!CE) return false;
774    int64_t Val = CE->getValue();
775    // Special case, #-0 is INT32_MIN.
776    return (Val > -256 && Val < 256) || Val == INT32_MIN;
777  }
778  bool isAddrMode5() const {
779    // If we have an immediate that's not a constant, treat it as a label
780    // reference needing a fixup. If it is a constant, it's something else
781    // and we reject it.
782    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
783      return true;
784    if (!isMemory() || Memory.Alignment != 0) return false;
785    // Check for register offset.
786    if (Memory.OffsetRegNum) return false;
787    // Immediate offset in range [-1020, 1020] and a multiple of 4.
788    if (!Memory.OffsetImm) return true;
789    int64_t Val = Memory.OffsetImm->getValue();
790    return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) ||
791      Val == INT32_MIN;
792  }
793  bool isMemTBB() const {
794    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
795        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
796      return false;
797    return true;
798  }
799  bool isMemTBH() const {
800    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
801        Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 ||
802        Memory.Alignment != 0 )
803      return false;
804    return true;
805  }
806  bool isMemRegOffset() const {
807    if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0)
808      return false;
809    return true;
810  }
811  bool isT2MemRegOffset() const {
812    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
813        Memory.Alignment != 0)
814      return false;
815    // Only lsl #{0, 1, 2, 3} allowed.
816    if (Memory.ShiftType == ARM_AM::no_shift)
817      return true;
818    if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3)
819      return false;
820    return true;
821  }
822  bool isMemThumbRR() const {
823    // Thumb reg+reg addressing is simple. Just two registers, a base and
824    // an offset. No shifts, negations or any other complicating factors.
825    if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative ||
826        Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0)
827      return false;
828    return isARMLowRegister(Memory.BaseRegNum) &&
829      (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum));
830  }
831  bool isMemThumbRIs4() const {
832    if (!isMemory() || Memory.OffsetRegNum != 0 ||
833        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
834      return false;
835    // Immediate offset, multiple of 4 in range [0, 124].
836    if (!Memory.OffsetImm) return true;
837    int64_t Val = Memory.OffsetImm->getValue();
838    return Val >= 0 && Val <= 124 && (Val % 4) == 0;
839  }
840  bool isMemThumbRIs2() const {
841    if (!isMemory() || Memory.OffsetRegNum != 0 ||
842        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
843      return false;
844    // Immediate offset, multiple of 4 in range [0, 62].
845    if (!Memory.OffsetImm) return true;
846    int64_t Val = Memory.OffsetImm->getValue();
847    return Val >= 0 && Val <= 62 && (Val % 2) == 0;
848  }
849  bool isMemThumbRIs1() const {
850    if (!isMemory() || Memory.OffsetRegNum != 0 ||
851        !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0)
852      return false;
853    // Immediate offset in range [0, 31].
854    if (!Memory.OffsetImm) return true;
855    int64_t Val = Memory.OffsetImm->getValue();
856    return Val >= 0 && Val <= 31;
857  }
858  bool isMemThumbSPI() const {
859    if (!isMemory() || Memory.OffsetRegNum != 0 ||
860        Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0)
861      return false;
862    // Immediate offset, multiple of 4 in range [0, 1020].
863    if (!Memory.OffsetImm) return true;
864    int64_t Val = Memory.OffsetImm->getValue();
865    return Val >= 0 && Val <= 1020 && (Val % 4) == 0;
866  }
867  bool isMemImm8s4Offset() const {
868    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
869      return false;
870    // Immediate offset a multiple of 4 in range [-1020, 1020].
871    if (!Memory.OffsetImm) return true;
872    int64_t Val = Memory.OffsetImm->getValue();
873    return Val >= -1020 && Val <= 1020 && (Val & 3) == 0;
874  }
875  bool isMemImm0_1020s4Offset() const {
876    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
877      return false;
878    // Immediate offset a multiple of 4 in range [0, 1020].
879    if (!Memory.OffsetImm) return true;
880    int64_t Val = Memory.OffsetImm->getValue();
881    return Val >= 0 && Val <= 1020 && (Val & 3) == 0;
882  }
883  bool isMemImm8Offset() const {
884    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
885      return false;
886    // Immediate offset in range [-255, 255].
887    if (!Memory.OffsetImm) return true;
888    int64_t Val = Memory.OffsetImm->getValue();
889    return (Val == INT32_MIN) || (Val > -256 && Val < 256);
890  }
891  bool isMemPosImm8Offset() const {
892    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
893      return false;
894    // Immediate offset in range [0, 255].
895    if (!Memory.OffsetImm) return true;
896    int64_t Val = Memory.OffsetImm->getValue();
897    return Val >= 0 && Val < 256;
898  }
899  bool isMemNegImm8Offset() const {
900    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
901      return false;
902    // Immediate offset in range [-255, -1].
903    if (!Memory.OffsetImm) return true;
904    int64_t Val = Memory.OffsetImm->getValue();
905    return Val > -256 && Val < 0;
906  }
907  bool isMemUImm12Offset() const {
908    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
909      return false;
910    // Immediate offset in range [0, 4095].
911    if (!Memory.OffsetImm) return true;
912    int64_t Val = Memory.OffsetImm->getValue();
913    return (Val >= 0 && Val < 4096);
914  }
915  bool isMemImm12Offset() const {
916    // If we have an immediate that's not a constant, treat it as a label
917    // reference needing a fixup. If it is a constant, it's something else
918    // and we reject it.
919    if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm()))
920      return true;
921
922    if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0)
923      return false;
924    // Immediate offset in range [-4095, 4095].
925    if (!Memory.OffsetImm) return true;
926    int64_t Val = Memory.OffsetImm->getValue();
927    return (Val > -4096 && Val < 4096) || (Val == INT32_MIN);
928  }
929  bool isPostIdxImm8() const {
930    if (Kind != k_Immediate)
931      return false;
932    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
933    if (!CE) return false;
934    int64_t Val = CE->getValue();
935    return (Val > -256 && Val < 256) || (Val == INT32_MIN);
936  }
937  bool isPostIdxImm8s4() const {
938    if (Kind != k_Immediate)
939      return false;
940    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
941    if (!CE) return false;
942    int64_t Val = CE->getValue();
943    return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) ||
944      (Val == INT32_MIN);
945  }
946
947  bool isMSRMask() const { return Kind == k_MSRMask; }
948  bool isProcIFlags() const { return Kind == k_ProcIFlags; }
949
950  // NEON operands.
951  bool isVecListOneD() const {
952    if (Kind != k_VectorList) return false;
953    return VectorList.Count == 1;
954  }
955
956  bool isVecListTwoD() const {
957    if (Kind != k_VectorList) return false;
958    return VectorList.Count == 2;
959  }
960
961  bool isVecListThreeD() const {
962    if (Kind != k_VectorList) return false;
963    return VectorList.Count == 3;
964  }
965
966  bool isVecListFourD() const {
967    if (Kind != k_VectorList) return false;
968    return VectorList.Count == 4;
969  }
970
971  bool isVecListTwoQ() const {
972    if (Kind != k_VectorList) return false;
973    //FIXME: We haven't taught the parser to handle by-two register lists
974    // yet, so don't pretend to know one.
975    return VectorList.Count == 2 && false;
976  }
977
978  bool isVecListOneDAllLanes() const {
979    if (Kind != k_VectorListAllLanes) return false;
980    return VectorList.Count == 1;
981  }
982
983  bool isVecListTwoDAllLanes() const {
984    if (Kind != k_VectorListAllLanes) return false;
985    return VectorList.Count == 2;
986  }
987
988  bool isVecListOneDByteIndexed() const {
989    if (Kind != k_VectorListIndexed) return false;
990    return VectorList.Count == 1 && VectorList.LaneIndex <= 7;
991  }
992
993  bool isVectorIndex8() const {
994    if (Kind != k_VectorIndex) return false;
995    return VectorIndex.Val < 8;
996  }
997  bool isVectorIndex16() const {
998    if (Kind != k_VectorIndex) return false;
999    return VectorIndex.Val < 4;
1000  }
1001  bool isVectorIndex32() const {
1002    if (Kind != k_VectorIndex) return false;
1003    return VectorIndex.Val < 2;
1004  }
1005
1006  bool isNEONi8splat() const {
1007    if (Kind != k_Immediate)
1008      return false;
1009    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1010    // Must be a constant.
1011    if (!CE) return false;
1012    int64_t Value = CE->getValue();
1013    // i8 value splatted across 8 bytes. The immediate is just the 8 byte
1014    // value.
1015    return Value >= 0 && Value < 256;
1016  }
1017
1018  bool isNEONi16splat() const {
1019    if (Kind != k_Immediate)
1020      return false;
1021    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1022    // Must be a constant.
1023    if (!CE) return false;
1024    int64_t Value = CE->getValue();
1025    // i16 value in the range [0,255] or [0x0100, 0xff00]
1026    return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00);
1027  }
1028
1029  bool isNEONi32splat() const {
1030    if (Kind != k_Immediate)
1031      return false;
1032    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1033    // Must be a constant.
1034    if (!CE) return false;
1035    int64_t Value = CE->getValue();
1036    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X.
1037    return (Value >= 0 && Value < 256) ||
1038      (Value >= 0x0100 && Value <= 0xff00) ||
1039      (Value >= 0x010000 && Value <= 0xff0000) ||
1040      (Value >= 0x01000000 && Value <= 0xff000000);
1041  }
1042
1043  bool isNEONi32vmov() const {
1044    if (Kind != k_Immediate)
1045      return false;
1046    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1047    // Must be a constant.
1048    if (!CE) return false;
1049    int64_t Value = CE->getValue();
1050    // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X,
1051    // for VMOV/VMVN only, 00Xf or 0Xff are also accepted.
1052    return (Value >= 0 && Value < 256) ||
1053      (Value >= 0x0100 && Value <= 0xff00) ||
1054      (Value >= 0x010000 && Value <= 0xff0000) ||
1055      (Value >= 0x01000000 && Value <= 0xff000000) ||
1056      (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) ||
1057      (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff);
1058  }
1059
1060  bool isNEONi64splat() const {
1061    if (Kind != k_Immediate)
1062      return false;
1063    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1064    // Must be a constant.
1065    if (!CE) return false;
1066    uint64_t Value = CE->getValue();
1067    // i64 value with each byte being either 0 or 0xff.
1068    for (unsigned i = 0; i < 8; ++i)
1069      if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false;
1070    return true;
1071  }
1072
1073  void addExpr(MCInst &Inst, const MCExpr *Expr) const {
1074    // Add as immediates when possible.  Null MCExpr = 0.
1075    if (Expr == 0)
1076      Inst.addOperand(MCOperand::CreateImm(0));
1077    else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr))
1078      Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1079    else
1080      Inst.addOperand(MCOperand::CreateExpr(Expr));
1081  }
1082
1083  void addCondCodeOperands(MCInst &Inst, unsigned N) const {
1084    assert(N == 2 && "Invalid number of operands!");
1085    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1086    unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR;
1087    Inst.addOperand(MCOperand::CreateReg(RegNum));
1088  }
1089
1090  void addCoprocNumOperands(MCInst &Inst, unsigned N) const {
1091    assert(N == 1 && "Invalid number of operands!");
1092    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1093  }
1094
1095  void addCoprocRegOperands(MCInst &Inst, unsigned N) const {
1096    assert(N == 1 && "Invalid number of operands!");
1097    Inst.addOperand(MCOperand::CreateImm(getCoproc()));
1098  }
1099
1100  void addCoprocOptionOperands(MCInst &Inst, unsigned N) const {
1101    assert(N == 1 && "Invalid number of operands!");
1102    Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val));
1103  }
1104
1105  void addITMaskOperands(MCInst &Inst, unsigned N) const {
1106    assert(N == 1 && "Invalid number of operands!");
1107    Inst.addOperand(MCOperand::CreateImm(ITMask.Mask));
1108  }
1109
1110  void addITCondCodeOperands(MCInst &Inst, unsigned N) const {
1111    assert(N == 1 && "Invalid number of operands!");
1112    Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode())));
1113  }
1114
1115  void addCCOutOperands(MCInst &Inst, unsigned N) const {
1116    assert(N == 1 && "Invalid number of operands!");
1117    Inst.addOperand(MCOperand::CreateReg(getReg()));
1118  }
1119
1120  void addRegOperands(MCInst &Inst, unsigned N) const {
1121    assert(N == 1 && "Invalid number of operands!");
1122    Inst.addOperand(MCOperand::CreateReg(getReg()));
1123  }
1124
1125  void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const {
1126    assert(N == 3 && "Invalid number of operands!");
1127    assert(isRegShiftedReg() &&
1128           "addRegShiftedRegOperands() on non RegShiftedReg!");
1129    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg));
1130    Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg));
1131    Inst.addOperand(MCOperand::CreateImm(
1132      ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm)));
1133  }
1134
1135  void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const {
1136    assert(N == 2 && "Invalid number of operands!");
1137    assert(isRegShiftedImm() &&
1138           "addRegShiftedImmOperands() on non RegShiftedImm!");
1139    Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg));
1140    Inst.addOperand(MCOperand::CreateImm(
1141      ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm)));
1142  }
1143
1144  void addShifterImmOperands(MCInst &Inst, unsigned N) const {
1145    assert(N == 1 && "Invalid number of operands!");
1146    Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) |
1147                                         ShifterImm.Imm));
1148  }
1149
1150  void addRegListOperands(MCInst &Inst, unsigned N) const {
1151    assert(N == 1 && "Invalid number of operands!");
1152    const SmallVectorImpl<unsigned> &RegList = getRegList();
1153    for (SmallVectorImpl<unsigned>::const_iterator
1154           I = RegList.begin(), E = RegList.end(); I != E; ++I)
1155      Inst.addOperand(MCOperand::CreateReg(*I));
1156  }
1157
1158  void addDPRRegListOperands(MCInst &Inst, unsigned N) const {
1159    addRegListOperands(Inst, N);
1160  }
1161
1162  void addSPRRegListOperands(MCInst &Inst, unsigned N) const {
1163    addRegListOperands(Inst, N);
1164  }
1165
1166  void addRotImmOperands(MCInst &Inst, unsigned N) const {
1167    assert(N == 1 && "Invalid number of operands!");
1168    // Encoded as val>>3. The printer handles display as 8, 16, 24.
1169    Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3));
1170  }
1171
1172  void addBitfieldOperands(MCInst &Inst, unsigned N) const {
1173    assert(N == 1 && "Invalid number of operands!");
1174    // Munge the lsb/width into a bitfield mask.
1175    unsigned lsb = Bitfield.LSB;
1176    unsigned width = Bitfield.Width;
1177    // Make a 32-bit mask w/ the referenced bits clear and all other bits set.
1178    uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >>
1179                      (32 - (lsb + width)));
1180    Inst.addOperand(MCOperand::CreateImm(Mask));
1181  }
1182
1183  void addImmOperands(MCInst &Inst, unsigned N) const {
1184    assert(N == 1 && "Invalid number of operands!");
1185    addExpr(Inst, getImm());
1186  }
1187
1188  void addFPImmOperands(MCInst &Inst, unsigned N) const {
1189    assert(N == 1 && "Invalid number of operands!");
1190    Inst.addOperand(MCOperand::CreateImm(getFPImm()));
1191  }
1192
1193  void addImm8s4Operands(MCInst &Inst, unsigned N) const {
1194    assert(N == 1 && "Invalid number of operands!");
1195    // FIXME: We really want to scale the value here, but the LDRD/STRD
1196    // instruction don't encode operands that way yet.
1197    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1198    Inst.addOperand(MCOperand::CreateImm(CE->getValue()));
1199  }
1200
1201  void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const {
1202    assert(N == 1 && "Invalid number of operands!");
1203    // The immediate is scaled by four in the encoding and is stored
1204    // in the MCInst as such. Lop off the low two bits here.
1205    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1206    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1207  }
1208
1209  void addImm0_508s4Operands(MCInst &Inst, unsigned N) const {
1210    assert(N == 1 && "Invalid number of operands!");
1211    // The immediate is scaled by four in the encoding and is stored
1212    // in the MCInst as such. Lop off the low two bits here.
1213    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1214    Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4));
1215  }
1216
1217  void addImm1_16Operands(MCInst &Inst, unsigned N) const {
1218    assert(N == 1 && "Invalid number of operands!");
1219    // The constant encodes as the immediate-1, and we store in the instruction
1220    // the bits as encoded, so subtract off one here.
1221    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1222    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1223  }
1224
1225  void addImm1_32Operands(MCInst &Inst, unsigned N) const {
1226    assert(N == 1 && "Invalid number of operands!");
1227    // The constant encodes as the immediate-1, and we store in the instruction
1228    // the bits as encoded, so subtract off one here.
1229    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1230    Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1));
1231  }
1232
1233  void addImmThumbSROperands(MCInst &Inst, unsigned N) const {
1234    assert(N == 1 && "Invalid number of operands!");
1235    // The constant encodes as the immediate, except for 32, which encodes as
1236    // zero.
1237    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1238    unsigned Imm = CE->getValue();
1239    Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm)));
1240  }
1241
1242  void addPKHASRImmOperands(MCInst &Inst, unsigned N) const {
1243    assert(N == 1 && "Invalid number of operands!");
1244    // An ASR value of 32 encodes as 0, so that's how we want to add it to
1245    // the instruction as well.
1246    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1247    int Val = CE->getValue();
1248    Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val));
1249  }
1250
1251  void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const {
1252    assert(N == 1 && "Invalid number of operands!");
1253    // The operand is actually a t2_so_imm, but we have its bitwise
1254    // negation in the assembly source, so twiddle it here.
1255    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1256    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1257  }
1258
1259  void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const {
1260    assert(N == 1 && "Invalid number of operands!");
1261    // The operand is actually a so_imm, but we have its bitwise
1262    // negation in the assembly source, so twiddle it here.
1263    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1264    Inst.addOperand(MCOperand::CreateImm(~CE->getValue()));
1265  }
1266
1267  void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const {
1268    assert(N == 1 && "Invalid number of operands!");
1269    Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt())));
1270  }
1271
1272  void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const {
1273    assert(N == 1 && "Invalid number of operands!");
1274    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1275  }
1276
1277  void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const {
1278    assert(N == 2 && "Invalid number of operands!");
1279    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1280    Inst.addOperand(MCOperand::CreateImm(Memory.Alignment));
1281  }
1282
1283  void addAddrMode2Operands(MCInst &Inst, unsigned N) const {
1284    assert(N == 3 && "Invalid number of operands!");
1285    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1286    if (!Memory.OffsetRegNum) {
1287      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1288      // Special case for #-0
1289      if (Val == INT32_MIN) Val = 0;
1290      if (Val < 0) Val = -Val;
1291      Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1292    } else {
1293      // For register offset, we encode the shift type and negation flag
1294      // here.
1295      Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1296                              Memory.ShiftImm, Memory.ShiftType);
1297    }
1298    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1299    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1300    Inst.addOperand(MCOperand::CreateImm(Val));
1301  }
1302
1303  void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const {
1304    assert(N == 2 && "Invalid number of operands!");
1305    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1306    assert(CE && "non-constant AM2OffsetImm operand!");
1307    int32_t Val = CE->getValue();
1308    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1309    // Special case for #-0
1310    if (Val == INT32_MIN) Val = 0;
1311    if (Val < 0) Val = -Val;
1312    Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift);
1313    Inst.addOperand(MCOperand::CreateReg(0));
1314    Inst.addOperand(MCOperand::CreateImm(Val));
1315  }
1316
1317  void addAddrMode3Operands(MCInst &Inst, unsigned N) const {
1318    assert(N == 3 && "Invalid number of operands!");
1319    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1320    if (!Memory.OffsetRegNum) {
1321      ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1322      // Special case for #-0
1323      if (Val == INT32_MIN) Val = 0;
1324      if (Val < 0) Val = -Val;
1325      Val = ARM_AM::getAM3Opc(AddSub, Val);
1326    } else {
1327      // For register offset, we encode the shift type and negation flag
1328      // here.
1329      Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0);
1330    }
1331    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1332    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1333    Inst.addOperand(MCOperand::CreateImm(Val));
1334  }
1335
1336  void addAM3OffsetOperands(MCInst &Inst, unsigned N) const {
1337    assert(N == 2 && "Invalid number of operands!");
1338    if (Kind == k_PostIndexRegister) {
1339      int32_t Val =
1340        ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0);
1341      Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1342      Inst.addOperand(MCOperand::CreateImm(Val));
1343      return;
1344    }
1345
1346    // Constant offset.
1347    const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm());
1348    int32_t Val = CE->getValue();
1349    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1350    // Special case for #-0
1351    if (Val == INT32_MIN) Val = 0;
1352    if (Val < 0) Val = -Val;
1353    Val = ARM_AM::getAM3Opc(AddSub, Val);
1354    Inst.addOperand(MCOperand::CreateReg(0));
1355    Inst.addOperand(MCOperand::CreateImm(Val));
1356  }
1357
1358  void addAddrMode5Operands(MCInst &Inst, unsigned N) const {
1359    assert(N == 2 && "Invalid number of operands!");
1360    // If we have an immediate that's not a constant, treat it as a label
1361    // reference needing a fixup. If it is a constant, it's something else
1362    // and we reject it.
1363    if (isImm()) {
1364      Inst.addOperand(MCOperand::CreateExpr(getImm()));
1365      Inst.addOperand(MCOperand::CreateImm(0));
1366      return;
1367    }
1368
1369    // The lower two bits are always zero and as such are not encoded.
1370    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1371    ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add;
1372    // Special case for #-0
1373    if (Val == INT32_MIN) Val = 0;
1374    if (Val < 0) Val = -Val;
1375    Val = ARM_AM::getAM5Opc(AddSub, Val);
1376    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1377    Inst.addOperand(MCOperand::CreateImm(Val));
1378  }
1379
1380  void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const {
1381    assert(N == 2 && "Invalid number of operands!");
1382    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1383    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1384    Inst.addOperand(MCOperand::CreateImm(Val));
1385  }
1386
1387  void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const {
1388    assert(N == 2 && "Invalid number of operands!");
1389    // The lower two bits are always zero and as such are not encoded.
1390    int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0;
1391    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1392    Inst.addOperand(MCOperand::CreateImm(Val));
1393  }
1394
1395  void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1396    assert(N == 2 && "Invalid number of operands!");
1397    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1398    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1399    Inst.addOperand(MCOperand::CreateImm(Val));
1400  }
1401
1402  void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1403    addMemImm8OffsetOperands(Inst, N);
1404  }
1405
1406  void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const {
1407    addMemImm8OffsetOperands(Inst, N);
1408  }
1409
1410  void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1411    assert(N == 2 && "Invalid number of operands!");
1412    // If this is an immediate, it's a label reference.
1413    if (Kind == k_Immediate) {
1414      addExpr(Inst, getImm());
1415      Inst.addOperand(MCOperand::CreateImm(0));
1416      return;
1417    }
1418
1419    // Otherwise, it's a normal memory reg+offset.
1420    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1421    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1422    Inst.addOperand(MCOperand::CreateImm(Val));
1423  }
1424
1425  void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const {
1426    assert(N == 2 && "Invalid number of operands!");
1427    // If this is an immediate, it's a label reference.
1428    if (Kind == k_Immediate) {
1429      addExpr(Inst, getImm());
1430      Inst.addOperand(MCOperand::CreateImm(0));
1431      return;
1432    }
1433
1434    // Otherwise, it's a normal memory reg+offset.
1435    int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0;
1436    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1437    Inst.addOperand(MCOperand::CreateImm(Val));
1438  }
1439
1440  void addMemTBBOperands(MCInst &Inst, unsigned N) const {
1441    assert(N == 2 && "Invalid number of operands!");
1442    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1443    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1444  }
1445
1446  void addMemTBHOperands(MCInst &Inst, unsigned N) const {
1447    assert(N == 2 && "Invalid number of operands!");
1448    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1449    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1450  }
1451
1452  void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1453    assert(N == 3 && "Invalid number of operands!");
1454    unsigned Val =
1455      ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add,
1456                        Memory.ShiftImm, Memory.ShiftType);
1457    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1458    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1459    Inst.addOperand(MCOperand::CreateImm(Val));
1460  }
1461
1462  void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const {
1463    assert(N == 3 && "Invalid number of operands!");
1464    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1465    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1466    Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm));
1467  }
1468
1469  void addMemThumbRROperands(MCInst &Inst, unsigned N) const {
1470    assert(N == 2 && "Invalid number of operands!");
1471    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1472    Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum));
1473  }
1474
1475  void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const {
1476    assert(N == 2 && "Invalid number of operands!");
1477    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1478    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1479    Inst.addOperand(MCOperand::CreateImm(Val));
1480  }
1481
1482  void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const {
1483    assert(N == 2 && "Invalid number of operands!");
1484    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0;
1485    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1486    Inst.addOperand(MCOperand::CreateImm(Val));
1487  }
1488
1489  void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const {
1490    assert(N == 2 && "Invalid number of operands!");
1491    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0;
1492    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1493    Inst.addOperand(MCOperand::CreateImm(Val));
1494  }
1495
1496  void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const {
1497    assert(N == 2 && "Invalid number of operands!");
1498    int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0;
1499    Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum));
1500    Inst.addOperand(MCOperand::CreateImm(Val));
1501  }
1502
1503  void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const {
1504    assert(N == 1 && "Invalid number of operands!");
1505    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1506    assert(CE && "non-constant post-idx-imm8 operand!");
1507    int Imm = CE->getValue();
1508    bool isAdd = Imm >= 0;
1509    if (Imm == INT32_MIN) Imm = 0;
1510    Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8;
1511    Inst.addOperand(MCOperand::CreateImm(Imm));
1512  }
1513
1514  void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const {
1515    assert(N == 1 && "Invalid number of operands!");
1516    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1517    assert(CE && "non-constant post-idx-imm8s4 operand!");
1518    int Imm = CE->getValue();
1519    bool isAdd = Imm >= 0;
1520    if (Imm == INT32_MIN) Imm = 0;
1521    // Immediate is scaled by 4.
1522    Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8;
1523    Inst.addOperand(MCOperand::CreateImm(Imm));
1524  }
1525
1526  void addPostIdxRegOperands(MCInst &Inst, unsigned N) const {
1527    assert(N == 2 && "Invalid number of operands!");
1528    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1529    Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd));
1530  }
1531
1532  void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const {
1533    assert(N == 2 && "Invalid number of operands!");
1534    Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum));
1535    // The sign, shift type, and shift amount are encoded in a single operand
1536    // using the AM2 encoding helpers.
1537    ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub;
1538    unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm,
1539                                     PostIdxReg.ShiftTy);
1540    Inst.addOperand(MCOperand::CreateImm(Imm));
1541  }
1542
1543  void addMSRMaskOperands(MCInst &Inst, unsigned N) const {
1544    assert(N == 1 && "Invalid number of operands!");
1545    Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask())));
1546  }
1547
1548  void addProcIFlagsOperands(MCInst &Inst, unsigned N) const {
1549    assert(N == 1 && "Invalid number of operands!");
1550    Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags())));
1551  }
1552
1553  void addVecListOperands(MCInst &Inst, unsigned N) const {
1554    assert(N == 1 && "Invalid number of operands!");
1555    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1556  }
1557
1558  void addVecListIndexedOperands(MCInst &Inst, unsigned N) const {
1559    assert(N == 2 && "Invalid number of operands!");
1560    Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum));
1561    Inst.addOperand(MCOperand::CreateImm(VectorList.LaneIndex));
1562  }
1563
1564  void addVectorIndex8Operands(MCInst &Inst, unsigned N) const {
1565    assert(N == 1 && "Invalid number of operands!");
1566    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1567  }
1568
1569  void addVectorIndex16Operands(MCInst &Inst, unsigned N) const {
1570    assert(N == 1 && "Invalid number of operands!");
1571    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1572  }
1573
1574  void addVectorIndex32Operands(MCInst &Inst, unsigned N) const {
1575    assert(N == 1 && "Invalid number of operands!");
1576    Inst.addOperand(MCOperand::CreateImm(getVectorIndex()));
1577  }
1578
1579  void addNEONi8splatOperands(MCInst &Inst, unsigned N) const {
1580    assert(N == 1 && "Invalid number of operands!");
1581    // The immediate encodes the type of constant as well as the value.
1582    // Mask in that this is an i8 splat.
1583    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1584    Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00));
1585  }
1586
1587  void addNEONi16splatOperands(MCInst &Inst, unsigned N) const {
1588    assert(N == 1 && "Invalid number of operands!");
1589    // The immediate encodes the type of constant as well as the value.
1590    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1591    unsigned Value = CE->getValue();
1592    if (Value >= 256)
1593      Value = (Value >> 8) | 0xa00;
1594    else
1595      Value |= 0x800;
1596    Inst.addOperand(MCOperand::CreateImm(Value));
1597  }
1598
1599  void addNEONi32splatOperands(MCInst &Inst, unsigned N) const {
1600    assert(N == 1 && "Invalid number of operands!");
1601    // The immediate encodes the type of constant as well as the value.
1602    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1603    unsigned Value = CE->getValue();
1604    if (Value >= 256 && Value <= 0xff00)
1605      Value = (Value >> 8) | 0x200;
1606    else if (Value > 0xffff && Value <= 0xff0000)
1607      Value = (Value >> 16) | 0x400;
1608    else if (Value > 0xffffff)
1609      Value = (Value >> 24) | 0x600;
1610    Inst.addOperand(MCOperand::CreateImm(Value));
1611  }
1612
1613  void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const {
1614    assert(N == 1 && "Invalid number of operands!");
1615    // The immediate encodes the type of constant as well as the value.
1616    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1617    unsigned Value = CE->getValue();
1618    if (Value >= 256 && Value <= 0xffff)
1619      Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200);
1620    else if (Value > 0xffff && Value <= 0xffffff)
1621      Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400);
1622    else if (Value > 0xffffff)
1623      Value = (Value >> 24) | 0x600;
1624    Inst.addOperand(MCOperand::CreateImm(Value));
1625  }
1626
1627  void addNEONi64splatOperands(MCInst &Inst, unsigned N) const {
1628    assert(N == 1 && "Invalid number of operands!");
1629    // The immediate encodes the type of constant as well as the value.
1630    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm());
1631    uint64_t Value = CE->getValue();
1632    unsigned Imm = 0;
1633    for (unsigned i = 0; i < 8; ++i, Value >>= 8) {
1634      Imm |= (Value & 1) << i;
1635    }
1636    Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00));
1637  }
1638
1639  virtual void print(raw_ostream &OS) const;
1640
1641  static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) {
1642    ARMOperand *Op = new ARMOperand(k_ITCondMask);
1643    Op->ITMask.Mask = Mask;
1644    Op->StartLoc = S;
1645    Op->EndLoc = S;
1646    return Op;
1647  }
1648
1649  static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) {
1650    ARMOperand *Op = new ARMOperand(k_CondCode);
1651    Op->CC.Val = CC;
1652    Op->StartLoc = S;
1653    Op->EndLoc = S;
1654    return Op;
1655  }
1656
1657  static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) {
1658    ARMOperand *Op = new ARMOperand(k_CoprocNum);
1659    Op->Cop.Val = CopVal;
1660    Op->StartLoc = S;
1661    Op->EndLoc = S;
1662    return Op;
1663  }
1664
1665  static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) {
1666    ARMOperand *Op = new ARMOperand(k_CoprocReg);
1667    Op->Cop.Val = CopVal;
1668    Op->StartLoc = S;
1669    Op->EndLoc = S;
1670    return Op;
1671  }
1672
1673  static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) {
1674    ARMOperand *Op = new ARMOperand(k_CoprocOption);
1675    Op->Cop.Val = Val;
1676    Op->StartLoc = S;
1677    Op->EndLoc = E;
1678    return Op;
1679  }
1680
1681  static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) {
1682    ARMOperand *Op = new ARMOperand(k_CCOut);
1683    Op->Reg.RegNum = RegNum;
1684    Op->StartLoc = S;
1685    Op->EndLoc = S;
1686    return Op;
1687  }
1688
1689  static ARMOperand *CreateToken(StringRef Str, SMLoc S) {
1690    ARMOperand *Op = new ARMOperand(k_Token);
1691    Op->Tok.Data = Str.data();
1692    Op->Tok.Length = Str.size();
1693    Op->StartLoc = S;
1694    Op->EndLoc = S;
1695    return Op;
1696  }
1697
1698  static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) {
1699    ARMOperand *Op = new ARMOperand(k_Register);
1700    Op->Reg.RegNum = RegNum;
1701    Op->StartLoc = S;
1702    Op->EndLoc = E;
1703    return Op;
1704  }
1705
1706  static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy,
1707                                           unsigned SrcReg,
1708                                           unsigned ShiftReg,
1709                                           unsigned ShiftImm,
1710                                           SMLoc S, SMLoc E) {
1711    ARMOperand *Op = new ARMOperand(k_ShiftedRegister);
1712    Op->RegShiftedReg.ShiftTy = ShTy;
1713    Op->RegShiftedReg.SrcReg = SrcReg;
1714    Op->RegShiftedReg.ShiftReg = ShiftReg;
1715    Op->RegShiftedReg.ShiftImm = ShiftImm;
1716    Op->StartLoc = S;
1717    Op->EndLoc = E;
1718    return Op;
1719  }
1720
1721  static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy,
1722                                            unsigned SrcReg,
1723                                            unsigned ShiftImm,
1724                                            SMLoc S, SMLoc E) {
1725    ARMOperand *Op = new ARMOperand(k_ShiftedImmediate);
1726    Op->RegShiftedImm.ShiftTy = ShTy;
1727    Op->RegShiftedImm.SrcReg = SrcReg;
1728    Op->RegShiftedImm.ShiftImm = ShiftImm;
1729    Op->StartLoc = S;
1730    Op->EndLoc = E;
1731    return Op;
1732  }
1733
1734  static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm,
1735                                   SMLoc S, SMLoc E) {
1736    ARMOperand *Op = new ARMOperand(k_ShifterImmediate);
1737    Op->ShifterImm.isASR = isASR;
1738    Op->ShifterImm.Imm = Imm;
1739    Op->StartLoc = S;
1740    Op->EndLoc = E;
1741    return Op;
1742  }
1743
1744  static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) {
1745    ARMOperand *Op = new ARMOperand(k_RotateImmediate);
1746    Op->RotImm.Imm = Imm;
1747    Op->StartLoc = S;
1748    Op->EndLoc = E;
1749    return Op;
1750  }
1751
1752  static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width,
1753                                    SMLoc S, SMLoc E) {
1754    ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor);
1755    Op->Bitfield.LSB = LSB;
1756    Op->Bitfield.Width = Width;
1757    Op->StartLoc = S;
1758    Op->EndLoc = E;
1759    return Op;
1760  }
1761
1762  static ARMOperand *
1763  CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs,
1764                SMLoc StartLoc, SMLoc EndLoc) {
1765    KindTy Kind = k_RegisterList;
1766
1767    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first))
1768      Kind = k_DPRRegisterList;
1769    else if (ARMMCRegisterClasses[ARM::SPRRegClassID].
1770             contains(Regs.front().first))
1771      Kind = k_SPRRegisterList;
1772
1773    ARMOperand *Op = new ARMOperand(Kind);
1774    for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator
1775           I = Regs.begin(), E = Regs.end(); I != E; ++I)
1776      Op->Registers.push_back(I->first);
1777    array_pod_sort(Op->Registers.begin(), Op->Registers.end());
1778    Op->StartLoc = StartLoc;
1779    Op->EndLoc = EndLoc;
1780    return Op;
1781  }
1782
1783  static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count,
1784                                      SMLoc S, SMLoc E) {
1785    ARMOperand *Op = new ARMOperand(k_VectorList);
1786    Op->VectorList.RegNum = RegNum;
1787    Op->VectorList.Count = Count;
1788    Op->StartLoc = S;
1789    Op->EndLoc = E;
1790    return Op;
1791  }
1792
1793  static ARMOperand *CreateVectorListAllLanes(unsigned RegNum, unsigned Count,
1794                                              SMLoc S, SMLoc E) {
1795    ARMOperand *Op = new ARMOperand(k_VectorListAllLanes);
1796    Op->VectorList.RegNum = RegNum;
1797    Op->VectorList.Count = Count;
1798    Op->StartLoc = S;
1799    Op->EndLoc = E;
1800    return Op;
1801  }
1802
1803  static ARMOperand *CreateVectorListIndexed(unsigned RegNum, unsigned Count,
1804                                             unsigned Index, SMLoc S, SMLoc E) {
1805    ARMOperand *Op = new ARMOperand(k_VectorListIndexed);
1806    Op->VectorList.RegNum = RegNum;
1807    Op->VectorList.Count = Count;
1808    Op->VectorList.LaneIndex = Index;
1809    Op->StartLoc = S;
1810    Op->EndLoc = E;
1811    return Op;
1812  }
1813
1814  static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E,
1815                                       MCContext &Ctx) {
1816    ARMOperand *Op = new ARMOperand(k_VectorIndex);
1817    Op->VectorIndex.Val = Idx;
1818    Op->StartLoc = S;
1819    Op->EndLoc = E;
1820    return Op;
1821  }
1822
1823  static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) {
1824    ARMOperand *Op = new ARMOperand(k_Immediate);
1825    Op->Imm.Val = Val;
1826    Op->StartLoc = S;
1827    Op->EndLoc = E;
1828    return Op;
1829  }
1830
1831  static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) {
1832    ARMOperand *Op = new ARMOperand(k_FPImmediate);
1833    Op->FPImm.Val = Val;
1834    Op->StartLoc = S;
1835    Op->EndLoc = S;
1836    return Op;
1837  }
1838
1839  static ARMOperand *CreateMem(unsigned BaseRegNum,
1840                               const MCConstantExpr *OffsetImm,
1841                               unsigned OffsetRegNum,
1842                               ARM_AM::ShiftOpc ShiftType,
1843                               unsigned ShiftImm,
1844                               unsigned Alignment,
1845                               bool isNegative,
1846                               SMLoc S, SMLoc E) {
1847    ARMOperand *Op = new ARMOperand(k_Memory);
1848    Op->Memory.BaseRegNum = BaseRegNum;
1849    Op->Memory.OffsetImm = OffsetImm;
1850    Op->Memory.OffsetRegNum = OffsetRegNum;
1851    Op->Memory.ShiftType = ShiftType;
1852    Op->Memory.ShiftImm = ShiftImm;
1853    Op->Memory.Alignment = Alignment;
1854    Op->Memory.isNegative = isNegative;
1855    Op->StartLoc = S;
1856    Op->EndLoc = E;
1857    return Op;
1858  }
1859
1860  static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd,
1861                                      ARM_AM::ShiftOpc ShiftTy,
1862                                      unsigned ShiftImm,
1863                                      SMLoc S, SMLoc E) {
1864    ARMOperand *Op = new ARMOperand(k_PostIndexRegister);
1865    Op->PostIdxReg.RegNum = RegNum;
1866    Op->PostIdxReg.isAdd = isAdd;
1867    Op->PostIdxReg.ShiftTy = ShiftTy;
1868    Op->PostIdxReg.ShiftImm = ShiftImm;
1869    Op->StartLoc = S;
1870    Op->EndLoc = E;
1871    return Op;
1872  }
1873
1874  static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) {
1875    ARMOperand *Op = new ARMOperand(k_MemBarrierOpt);
1876    Op->MBOpt.Val = Opt;
1877    Op->StartLoc = S;
1878    Op->EndLoc = S;
1879    return Op;
1880  }
1881
1882  static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) {
1883    ARMOperand *Op = new ARMOperand(k_ProcIFlags);
1884    Op->IFlags.Val = IFlags;
1885    Op->StartLoc = S;
1886    Op->EndLoc = S;
1887    return Op;
1888  }
1889
1890  static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) {
1891    ARMOperand *Op = new ARMOperand(k_MSRMask);
1892    Op->MMask.Val = MMask;
1893    Op->StartLoc = S;
1894    Op->EndLoc = S;
1895    return Op;
1896  }
1897};
1898
1899} // end anonymous namespace.
1900
1901void ARMOperand::print(raw_ostream &OS) const {
1902  switch (Kind) {
1903  case k_FPImmediate:
1904    OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm())
1905       << ") >";
1906    break;
1907  case k_CondCode:
1908    OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">";
1909    break;
1910  case k_CCOut:
1911    OS << "<ccout " << getReg() << ">";
1912    break;
1913  case k_ITCondMask: {
1914    static const char *MaskStr[] = {
1915      "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)",
1916      "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)"
1917    };
1918    assert((ITMask.Mask & 0xf) == ITMask.Mask);
1919    OS << "<it-mask " << MaskStr[ITMask.Mask] << ">";
1920    break;
1921  }
1922  case k_CoprocNum:
1923    OS << "<coprocessor number: " << getCoproc() << ">";
1924    break;
1925  case k_CoprocReg:
1926    OS << "<coprocessor register: " << getCoproc() << ">";
1927    break;
1928  case k_CoprocOption:
1929    OS << "<coprocessor option: " << CoprocOption.Val << ">";
1930    break;
1931  case k_MSRMask:
1932    OS << "<mask: " << getMSRMask() << ">";
1933    break;
1934  case k_Immediate:
1935    getImm()->print(OS);
1936    break;
1937  case k_MemBarrierOpt:
1938    OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">";
1939    break;
1940  case k_Memory:
1941    OS << "<memory "
1942       << " base:" << Memory.BaseRegNum;
1943    OS << ">";
1944    break;
1945  case k_PostIndexRegister:
1946    OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-")
1947       << PostIdxReg.RegNum;
1948    if (PostIdxReg.ShiftTy != ARM_AM::no_shift)
1949      OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " "
1950         << PostIdxReg.ShiftImm;
1951    OS << ">";
1952    break;
1953  case k_ProcIFlags: {
1954    OS << "<ARM_PROC::";
1955    unsigned IFlags = getProcIFlags();
1956    for (int i=2; i >= 0; --i)
1957      if (IFlags & (1 << i))
1958        OS << ARM_PROC::IFlagsToString(1 << i);
1959    OS << ">";
1960    break;
1961  }
1962  case k_Register:
1963    OS << "<register " << getReg() << ">";
1964    break;
1965  case k_ShifterImmediate:
1966    OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl")
1967       << " #" << ShifterImm.Imm << ">";
1968    break;
1969  case k_ShiftedRegister:
1970    OS << "<so_reg_reg "
1971       << RegShiftedReg.SrcReg << " "
1972       << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy)
1973       << " " << RegShiftedReg.ShiftReg << ">";
1974    break;
1975  case k_ShiftedImmediate:
1976    OS << "<so_reg_imm "
1977       << RegShiftedImm.SrcReg << " "
1978       << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy)
1979       << " #" << RegShiftedImm.ShiftImm << ">";
1980    break;
1981  case k_RotateImmediate:
1982    OS << "<ror " << " #" << (RotImm.Imm * 8) << ">";
1983    break;
1984  case k_BitfieldDescriptor:
1985    OS << "<bitfield " << "lsb: " << Bitfield.LSB
1986       << ", width: " << Bitfield.Width << ">";
1987    break;
1988  case k_RegisterList:
1989  case k_DPRRegisterList:
1990  case k_SPRRegisterList: {
1991    OS << "<register_list ";
1992
1993    const SmallVectorImpl<unsigned> &RegList = getRegList();
1994    for (SmallVectorImpl<unsigned>::const_iterator
1995           I = RegList.begin(), E = RegList.end(); I != E; ) {
1996      OS << *I;
1997      if (++I < E) OS << ", ";
1998    }
1999
2000    OS << ">";
2001    break;
2002  }
2003  case k_VectorList:
2004    OS << "<vector_list " << VectorList.Count << " * "
2005       << VectorList.RegNum << ">";
2006    break;
2007  case k_VectorListAllLanes:
2008    OS << "<vector_list(all lanes) " << VectorList.Count << " * "
2009       << VectorList.RegNum << ">";
2010    break;
2011  case k_VectorListIndexed:
2012    OS << "<vector_list(lane " << VectorList.LaneIndex << ") "
2013       << VectorList.Count << " * " << VectorList.RegNum << ">";
2014    break;
2015  case k_Token:
2016    OS << "'" << getToken() << "'";
2017    break;
2018  case k_VectorIndex:
2019    OS << "<vectorindex " << getVectorIndex() << ">";
2020    break;
2021  }
2022}
2023
2024/// @name Auto-generated Match Functions
2025/// {
2026
2027static unsigned MatchRegisterName(StringRef Name);
2028
2029/// }
2030
2031bool ARMAsmParser::ParseRegister(unsigned &RegNo,
2032                                 SMLoc &StartLoc, SMLoc &EndLoc) {
2033  RegNo = tryParseRegister();
2034
2035  return (RegNo == (unsigned)-1);
2036}
2037
2038/// Try to parse a register name.  The token must be an Identifier when called,
2039/// and if it is a register name the token is eaten and the register number is
2040/// returned.  Otherwise return -1.
2041///
2042int ARMAsmParser::tryParseRegister() {
2043  const AsmToken &Tok = Parser.getTok();
2044  if (Tok.isNot(AsmToken::Identifier)) return -1;
2045
2046  // FIXME: Validate register for the current architecture; we have to do
2047  // validation later, so maybe there is no need for this here.
2048  std::string lowerCase = Tok.getString().lower();
2049  unsigned RegNum = MatchRegisterName(lowerCase);
2050  if (!RegNum) {
2051    RegNum = StringSwitch<unsigned>(lowerCase)
2052      .Case("r13", ARM::SP)
2053      .Case("r14", ARM::LR)
2054      .Case("r15", ARM::PC)
2055      .Case("ip", ARM::R12)
2056      .Default(0);
2057  }
2058  if (!RegNum) return -1;
2059
2060  Parser.Lex(); // Eat identifier token.
2061
2062  return RegNum;
2063}
2064
2065// Try to parse a shifter  (e.g., "lsl <amt>"). On success, return 0.
2066// If a recoverable error occurs, return 1. If an irrecoverable error
2067// occurs, return -1. An irrecoverable error is one where tokens have been
2068// consumed in the process of trying to parse the shifter (i.e., when it is
2069// indeed a shifter operand, but malformed).
2070int ARMAsmParser::tryParseShiftRegister(
2071                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2072  SMLoc S = Parser.getTok().getLoc();
2073  const AsmToken &Tok = Parser.getTok();
2074  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2075
2076  std::string lowerCase = Tok.getString().lower();
2077  ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase)
2078      .Case("lsl", ARM_AM::lsl)
2079      .Case("lsr", ARM_AM::lsr)
2080      .Case("asr", ARM_AM::asr)
2081      .Case("ror", ARM_AM::ror)
2082      .Case("rrx", ARM_AM::rrx)
2083      .Default(ARM_AM::no_shift);
2084
2085  if (ShiftTy == ARM_AM::no_shift)
2086    return 1;
2087
2088  Parser.Lex(); // Eat the operator.
2089
2090  // The source register for the shift has already been added to the
2091  // operand list, so we need to pop it off and combine it into the shifted
2092  // register operand instead.
2093  OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val());
2094  if (!PrevOp->isReg())
2095    return Error(PrevOp->getStartLoc(), "shift must be of a register");
2096  int SrcReg = PrevOp->getReg();
2097  int64_t Imm = 0;
2098  int ShiftReg = 0;
2099  if (ShiftTy == ARM_AM::rrx) {
2100    // RRX Doesn't have an explicit shift amount. The encoder expects
2101    // the shift register to be the same as the source register. Seems odd,
2102    // but OK.
2103    ShiftReg = SrcReg;
2104  } else {
2105    // Figure out if this is shifted by a constant or a register (for non-RRX).
2106    if (Parser.getTok().is(AsmToken::Hash)) {
2107      Parser.Lex(); // Eat hash.
2108      SMLoc ImmLoc = Parser.getTok().getLoc();
2109      const MCExpr *ShiftExpr = 0;
2110      if (getParser().ParseExpression(ShiftExpr)) {
2111        Error(ImmLoc, "invalid immediate shift value");
2112        return -1;
2113      }
2114      // The expression must be evaluatable as an immediate.
2115      const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr);
2116      if (!CE) {
2117        Error(ImmLoc, "invalid immediate shift value");
2118        return -1;
2119      }
2120      // Range check the immediate.
2121      // lsl, ror: 0 <= imm <= 31
2122      // lsr, asr: 0 <= imm <= 32
2123      Imm = CE->getValue();
2124      if (Imm < 0 ||
2125          ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) ||
2126          ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) {
2127        Error(ImmLoc, "immediate shift value out of range");
2128        return -1;
2129      }
2130    } else if (Parser.getTok().is(AsmToken::Identifier)) {
2131      ShiftReg = tryParseRegister();
2132      SMLoc L = Parser.getTok().getLoc();
2133      if (ShiftReg == -1) {
2134        Error (L, "expected immediate or register in shift operand");
2135        return -1;
2136      }
2137    } else {
2138      Error (Parser.getTok().getLoc(),
2139                    "expected immediate or register in shift operand");
2140      return -1;
2141    }
2142  }
2143
2144  if (ShiftReg && ShiftTy != ARM_AM::rrx)
2145    Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg,
2146                                                         ShiftReg, Imm,
2147                                               S, Parser.getTok().getLoc()));
2148  else
2149    Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm,
2150                                               S, Parser.getTok().getLoc()));
2151
2152  return 0;
2153}
2154
2155
2156/// Try to parse a register name.  The token must be an Identifier when called.
2157/// If it's a register, an AsmOperand is created. Another AsmOperand is created
2158/// if there is a "writeback". 'true' if it's not a register.
2159///
2160/// TODO this is likely to change to allow different register types and or to
2161/// parse for a specific register type.
2162bool ARMAsmParser::
2163tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2164  SMLoc S = Parser.getTok().getLoc();
2165  int RegNo = tryParseRegister();
2166  if (RegNo == -1)
2167    return true;
2168
2169  Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc()));
2170
2171  const AsmToken &ExclaimTok = Parser.getTok();
2172  if (ExclaimTok.is(AsmToken::Exclaim)) {
2173    Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(),
2174                                               ExclaimTok.getLoc()));
2175    Parser.Lex(); // Eat exclaim token
2176    return false;
2177  }
2178
2179  // Also check for an index operand. This is only legal for vector registers,
2180  // but that'll get caught OK in operand matching, so we don't need to
2181  // explicitly filter everything else out here.
2182  if (Parser.getTok().is(AsmToken::LBrac)) {
2183    SMLoc SIdx = Parser.getTok().getLoc();
2184    Parser.Lex(); // Eat left bracket token.
2185
2186    const MCExpr *ImmVal;
2187    if (getParser().ParseExpression(ImmVal))
2188      return MatchOperand_ParseFail;
2189    const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal);
2190    if (!MCE) {
2191      TokError("immediate value expected for vector index");
2192      return MatchOperand_ParseFail;
2193    }
2194
2195    SMLoc E = Parser.getTok().getLoc();
2196    if (Parser.getTok().isNot(AsmToken::RBrac)) {
2197      Error(E, "']' expected");
2198      return MatchOperand_ParseFail;
2199    }
2200
2201    Parser.Lex(); // Eat right bracket token.
2202
2203    Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(),
2204                                                     SIdx, E,
2205                                                     getContext()));
2206  }
2207
2208  return false;
2209}
2210
2211/// MatchCoprocessorOperandName - Try to parse an coprocessor related
2212/// instruction with a symbolic operand name. Example: "p1", "p7", "c3",
2213/// "c5", ...
2214static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) {
2215  // Use the same layout as the tablegen'erated register name matcher. Ugly,
2216  // but efficient.
2217  switch (Name.size()) {
2218  default: break;
2219  case 2:
2220    if (Name[0] != CoprocOp)
2221      return -1;
2222    switch (Name[1]) {
2223    default:  return -1;
2224    case '0': return 0;
2225    case '1': return 1;
2226    case '2': return 2;
2227    case '3': return 3;
2228    case '4': return 4;
2229    case '5': return 5;
2230    case '6': return 6;
2231    case '7': return 7;
2232    case '8': return 8;
2233    case '9': return 9;
2234    }
2235    break;
2236  case 3:
2237    if (Name[0] != CoprocOp || Name[1] != '1')
2238      return -1;
2239    switch (Name[2]) {
2240    default:  return -1;
2241    case '0': return 10;
2242    case '1': return 11;
2243    case '2': return 12;
2244    case '3': return 13;
2245    case '4': return 14;
2246    case '5': return 15;
2247    }
2248    break;
2249  }
2250
2251  return -1;
2252}
2253
2254/// parseITCondCode - Try to parse a condition code for an IT instruction.
2255ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2256parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2257  SMLoc S = Parser.getTok().getLoc();
2258  const AsmToken &Tok = Parser.getTok();
2259  if (!Tok.is(AsmToken::Identifier))
2260    return MatchOperand_NoMatch;
2261  unsigned CC = StringSwitch<unsigned>(Tok.getString())
2262    .Case("eq", ARMCC::EQ)
2263    .Case("ne", ARMCC::NE)
2264    .Case("hs", ARMCC::HS)
2265    .Case("cs", ARMCC::HS)
2266    .Case("lo", ARMCC::LO)
2267    .Case("cc", ARMCC::LO)
2268    .Case("mi", ARMCC::MI)
2269    .Case("pl", ARMCC::PL)
2270    .Case("vs", ARMCC::VS)
2271    .Case("vc", ARMCC::VC)
2272    .Case("hi", ARMCC::HI)
2273    .Case("ls", ARMCC::LS)
2274    .Case("ge", ARMCC::GE)
2275    .Case("lt", ARMCC::LT)
2276    .Case("gt", ARMCC::GT)
2277    .Case("le", ARMCC::LE)
2278    .Case("al", ARMCC::AL)
2279    .Default(~0U);
2280  if (CC == ~0U)
2281    return MatchOperand_NoMatch;
2282  Parser.Lex(); // Eat the token.
2283
2284  Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S));
2285
2286  return MatchOperand_Success;
2287}
2288
2289/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The
2290/// token must be an Identifier when called, and if it is a coprocessor
2291/// number, the token is eaten and the operand is added to the operand list.
2292ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2293parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2294  SMLoc S = Parser.getTok().getLoc();
2295  const AsmToken &Tok = Parser.getTok();
2296  if (Tok.isNot(AsmToken::Identifier))
2297    return MatchOperand_NoMatch;
2298
2299  int Num = MatchCoprocessorOperandName(Tok.getString(), 'p');
2300  if (Num == -1)
2301    return MatchOperand_NoMatch;
2302
2303  Parser.Lex(); // Eat identifier token.
2304  Operands.push_back(ARMOperand::CreateCoprocNum(Num, S));
2305  return MatchOperand_Success;
2306}
2307
2308/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The
2309/// token must be an Identifier when called, and if it is a coprocessor
2310/// number, the token is eaten and the operand is added to the operand list.
2311ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2312parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2313  SMLoc S = Parser.getTok().getLoc();
2314  const AsmToken &Tok = Parser.getTok();
2315  if (Tok.isNot(AsmToken::Identifier))
2316    return MatchOperand_NoMatch;
2317
2318  int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c');
2319  if (Reg == -1)
2320    return MatchOperand_NoMatch;
2321
2322  Parser.Lex(); // Eat identifier token.
2323  Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S));
2324  return MatchOperand_Success;
2325}
2326
2327/// parseCoprocOptionOperand - Try to parse an coprocessor option operand.
2328/// coproc_option : '{' imm0_255 '}'
2329ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2330parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2331  SMLoc S = Parser.getTok().getLoc();
2332
2333  // If this isn't a '{', this isn't a coprocessor immediate operand.
2334  if (Parser.getTok().isNot(AsmToken::LCurly))
2335    return MatchOperand_NoMatch;
2336  Parser.Lex(); // Eat the '{'
2337
2338  const MCExpr *Expr;
2339  SMLoc Loc = Parser.getTok().getLoc();
2340  if (getParser().ParseExpression(Expr)) {
2341    Error(Loc, "illegal expression");
2342    return MatchOperand_ParseFail;
2343  }
2344  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
2345  if (!CE || CE->getValue() < 0 || CE->getValue() > 255) {
2346    Error(Loc, "coprocessor option must be an immediate in range [0, 255]");
2347    return MatchOperand_ParseFail;
2348  }
2349  int Val = CE->getValue();
2350
2351  // Check for and consume the closing '}'
2352  if (Parser.getTok().isNot(AsmToken::RCurly))
2353    return MatchOperand_ParseFail;
2354  SMLoc E = Parser.getTok().getLoc();
2355  Parser.Lex(); // Eat the '}'
2356
2357  Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E));
2358  return MatchOperand_Success;
2359}
2360
2361// For register list parsing, we need to map from raw GPR register numbering
2362// to the enumeration values. The enumeration values aren't sorted by
2363// register number due to our using "sp", "lr" and "pc" as canonical names.
2364static unsigned getNextRegister(unsigned Reg) {
2365  // If this is a GPR, we need to do it manually, otherwise we can rely
2366  // on the sort ordering of the enumeration since the other reg-classes
2367  // are sane.
2368  if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2369    return Reg + 1;
2370  switch(Reg) {
2371  default: assert(0 && "Invalid GPR number!");
2372  case ARM::R0:  return ARM::R1;  case ARM::R1:  return ARM::R2;
2373  case ARM::R2:  return ARM::R3;  case ARM::R3:  return ARM::R4;
2374  case ARM::R4:  return ARM::R5;  case ARM::R5:  return ARM::R6;
2375  case ARM::R6:  return ARM::R7;  case ARM::R7:  return ARM::R8;
2376  case ARM::R8:  return ARM::R9;  case ARM::R9:  return ARM::R10;
2377  case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12;
2378  case ARM::R12: return ARM::SP;  case ARM::SP:  return ARM::LR;
2379  case ARM::LR:  return ARM::PC;  case ARM::PC:  return ARM::R0;
2380  }
2381}
2382
2383// Return the low-subreg of a given Q register.
2384static unsigned getDRegFromQReg(unsigned QReg) {
2385  switch (QReg) {
2386  default: llvm_unreachable("expected a Q register!");
2387  case ARM::Q0:  return ARM::D0;
2388  case ARM::Q1:  return ARM::D2;
2389  case ARM::Q2:  return ARM::D4;
2390  case ARM::Q3:  return ARM::D6;
2391  case ARM::Q4:  return ARM::D8;
2392  case ARM::Q5:  return ARM::D10;
2393  case ARM::Q6:  return ARM::D12;
2394  case ARM::Q7:  return ARM::D14;
2395  case ARM::Q8:  return ARM::D16;
2396  case ARM::Q9:  return ARM::D18;
2397  case ARM::Q10: return ARM::D20;
2398  case ARM::Q11: return ARM::D22;
2399  case ARM::Q12: return ARM::D24;
2400  case ARM::Q13: return ARM::D26;
2401  case ARM::Q14: return ARM::D28;
2402  case ARM::Q15: return ARM::D30;
2403  }
2404}
2405
2406/// Parse a register list.
2407bool ARMAsmParser::
2408parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2409  assert(Parser.getTok().is(AsmToken::LCurly) &&
2410         "Token is not a Left Curly Brace");
2411  SMLoc S = Parser.getTok().getLoc();
2412  Parser.Lex(); // Eat '{' token.
2413  SMLoc RegLoc = Parser.getTok().getLoc();
2414
2415  // Check the first register in the list to see what register class
2416  // this is a list of.
2417  int Reg = tryParseRegister();
2418  if (Reg == -1)
2419    return Error(RegLoc, "register expected");
2420
2421  // The reglist instructions have at most 16 registers, so reserve
2422  // space for that many.
2423  SmallVector<std::pair<unsigned, SMLoc>, 16> Registers;
2424
2425  // Allow Q regs and just interpret them as the two D sub-registers.
2426  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2427    Reg = getDRegFromQReg(Reg);
2428    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2429    ++Reg;
2430  }
2431  const MCRegisterClass *RC;
2432  if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg))
2433    RC = &ARMMCRegisterClasses[ARM::GPRRegClassID];
2434  else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg))
2435    RC = &ARMMCRegisterClasses[ARM::DPRRegClassID];
2436  else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg))
2437    RC = &ARMMCRegisterClasses[ARM::SPRRegClassID];
2438  else
2439    return Error(RegLoc, "invalid register in register list");
2440
2441  // Store the register.
2442  Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2443
2444  // This starts immediately after the first register token in the list,
2445  // so we can see either a comma or a minus (range separator) as a legal
2446  // next token.
2447  while (Parser.getTok().is(AsmToken::Comma) ||
2448         Parser.getTok().is(AsmToken::Minus)) {
2449    if (Parser.getTok().is(AsmToken::Minus)) {
2450      Parser.Lex(); // Eat the minus.
2451      SMLoc EndLoc = Parser.getTok().getLoc();
2452      int EndReg = tryParseRegister();
2453      if (EndReg == -1)
2454        return Error(EndLoc, "register expected");
2455      // Allow Q regs and just interpret them as the two D sub-registers.
2456      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2457        EndReg = getDRegFromQReg(EndReg) + 1;
2458      // If the register is the same as the start reg, there's nothing
2459      // more to do.
2460      if (Reg == EndReg)
2461        continue;
2462      // The register must be in the same register class as the first.
2463      if (!RC->contains(EndReg))
2464        return Error(EndLoc, "invalid register in register list");
2465      // Ranges must go from low to high.
2466      if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg))
2467        return Error(EndLoc, "bad range in register list");
2468
2469      // Add all the registers in the range to the register list.
2470      while (Reg != EndReg) {
2471        Reg = getNextRegister(Reg);
2472        Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2473      }
2474      continue;
2475    }
2476    Parser.Lex(); // Eat the comma.
2477    RegLoc = Parser.getTok().getLoc();
2478    int OldReg = Reg;
2479    Reg = tryParseRegister();
2480    if (Reg == -1)
2481      return Error(RegLoc, "register expected");
2482    // Allow Q regs and just interpret them as the two D sub-registers.
2483    bool isQReg = false;
2484    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2485      Reg = getDRegFromQReg(Reg);
2486      isQReg = true;
2487    }
2488    // The register must be in the same register class as the first.
2489    if (!RC->contains(Reg))
2490      return Error(RegLoc, "invalid register in register list");
2491    // List must be monotonically increasing.
2492    if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg))
2493      return Error(RegLoc, "register list not in ascending order");
2494    // VFP register lists must also be contiguous.
2495    // It's OK to use the enumeration values directly here rather, as the
2496    // VFP register classes have the enum sorted properly.
2497    if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] &&
2498        Reg != OldReg + 1)
2499      return Error(RegLoc, "non-contiguous register range");
2500    Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc));
2501    if (isQReg)
2502      Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc));
2503  }
2504
2505  SMLoc E = Parser.getTok().getLoc();
2506  if (Parser.getTok().isNot(AsmToken::RCurly))
2507    return Error(E, "'}' expected");
2508  Parser.Lex(); // Eat '}' token.
2509
2510  Operands.push_back(ARMOperand::CreateRegList(Registers, S, E));
2511  return false;
2512}
2513
2514// Helper function to parse the lane index for vector lists.
2515ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2516parseVectorLane(VectorLaneTy &LaneKind, unsigned &Index) {
2517  Index = 0; // Always return a defined index value.
2518  if (Parser.getTok().is(AsmToken::LBrac)) {
2519    Parser.Lex(); // Eat the '['.
2520    if (Parser.getTok().is(AsmToken::RBrac)) {
2521      // "Dn[]" is the 'all lanes' syntax.
2522      LaneKind = AllLanes;
2523      Parser.Lex(); // Eat the ']'.
2524      return MatchOperand_Success;
2525    }
2526    if (Parser.getTok().is(AsmToken::Integer)) {
2527      int64_t Val = Parser.getTok().getIntVal();
2528      // Make this range check context sensitive for .8, .16, .32.
2529      if (Val < 0 && Val > 7)
2530        Error(Parser.getTok().getLoc(), "lane index out of range");
2531      Index = Val;
2532      LaneKind = IndexedLane;
2533      Parser.Lex(); // Eat the token;
2534      if (Parser.getTok().isNot(AsmToken::RBrac))
2535        Error(Parser.getTok().getLoc(), "']' expected");
2536      Parser.Lex(); // Eat the ']'.
2537      return MatchOperand_Success;
2538    }
2539    Error(Parser.getTok().getLoc(), "lane index must be empty or an integer");
2540    return MatchOperand_ParseFail;
2541  }
2542  LaneKind = NoLanes;
2543  return MatchOperand_Success;
2544}
2545
2546// parse a vector register list
2547ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2548parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2549  VectorLaneTy LaneKind;
2550  unsigned LaneIndex;
2551  SMLoc S = Parser.getTok().getLoc();
2552  // As an extension (to match gas), support a plain D register or Q register
2553  // (without encosing curly braces) as a single or double entry list,
2554  // respectively.
2555  if (Parser.getTok().is(AsmToken::Identifier)) {
2556    int Reg = tryParseRegister();
2557    if (Reg == -1)
2558      return MatchOperand_NoMatch;
2559    SMLoc E = Parser.getTok().getLoc();
2560    if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) {
2561      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2562      if (Res != MatchOperand_Success)
2563        return Res;
2564      switch (LaneKind) {
2565      default:
2566        assert(0 && "unexpected lane kind!");
2567      case NoLanes:
2568        E = Parser.getTok().getLoc();
2569        Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E));
2570        break;
2571      case AllLanes:
2572        E = Parser.getTok().getLoc();
2573        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 1, S, E));
2574        break;
2575      case IndexedLane:
2576        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 1,
2577                                                               LaneIndex, S,E));
2578        break;
2579      }
2580      return MatchOperand_Success;
2581    }
2582    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2583      Reg = getDRegFromQReg(Reg);
2584      OperandMatchResultTy Res = parseVectorLane(LaneKind, LaneIndex);
2585      if (Res != MatchOperand_Success)
2586        return Res;
2587      switch (LaneKind) {
2588      default:
2589        assert(0 && "unexpected lane kind!");
2590      case NoLanes:
2591        E = Parser.getTok().getLoc();
2592        Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E));
2593        break;
2594      case AllLanes:
2595        E = Parser.getTok().getLoc();
2596        Operands.push_back(ARMOperand::CreateVectorListAllLanes(Reg, 2, S, E));
2597        break;
2598      case IndexedLane:
2599        Operands.push_back(ARMOperand::CreateVectorListIndexed(Reg, 2,
2600                                                               LaneIndex, S,E));
2601        break;
2602      }
2603      return MatchOperand_Success;
2604    }
2605    Error(S, "vector register expected");
2606    return MatchOperand_ParseFail;
2607  }
2608
2609  if (Parser.getTok().isNot(AsmToken::LCurly))
2610    return MatchOperand_NoMatch;
2611
2612  Parser.Lex(); // Eat '{' token.
2613  SMLoc RegLoc = Parser.getTok().getLoc();
2614
2615  int Reg = tryParseRegister();
2616  if (Reg == -1) {
2617    Error(RegLoc, "register expected");
2618    return MatchOperand_ParseFail;
2619  }
2620  unsigned Count = 1;
2621  unsigned FirstReg = Reg;
2622  // The list is of D registers, but we also allow Q regs and just interpret
2623  // them as the two D sub-registers.
2624  if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2625    FirstReg = Reg = getDRegFromQReg(Reg);
2626    ++Reg;
2627    ++Count;
2628  }
2629  if (parseVectorLane(LaneKind, LaneIndex) != MatchOperand_Success)
2630    return MatchOperand_ParseFail;
2631
2632  while (Parser.getTok().is(AsmToken::Comma) ||
2633         Parser.getTok().is(AsmToken::Minus)) {
2634    if (Parser.getTok().is(AsmToken::Minus)) {
2635      Parser.Lex(); // Eat the minus.
2636      SMLoc EndLoc = Parser.getTok().getLoc();
2637      int EndReg = tryParseRegister();
2638      if (EndReg == -1) {
2639        Error(EndLoc, "register expected");
2640        return MatchOperand_ParseFail;
2641      }
2642      // Allow Q regs and just interpret them as the two D sub-registers.
2643      if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg))
2644        EndReg = getDRegFromQReg(EndReg) + 1;
2645      // If the register is the same as the start reg, there's nothing
2646      // more to do.
2647      if (Reg == EndReg)
2648        continue;
2649      // The register must be in the same register class as the first.
2650      if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) {
2651        Error(EndLoc, "invalid register in register list");
2652        return MatchOperand_ParseFail;
2653      }
2654      // Ranges must go from low to high.
2655      if (Reg > EndReg) {
2656        Error(EndLoc, "bad range in register list");
2657        return MatchOperand_ParseFail;
2658      }
2659      // Parse the lane specifier if present.
2660      VectorLaneTy NextLaneKind;
2661      unsigned NextLaneIndex;
2662      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2663        return MatchOperand_ParseFail;
2664      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2665        Error(EndLoc, "mismatched lane index in register list");
2666        return MatchOperand_ParseFail;
2667      }
2668      EndLoc = Parser.getTok().getLoc();
2669
2670      // Add all the registers in the range to the register list.
2671      Count += EndReg - Reg;
2672      Reg = EndReg;
2673      continue;
2674    }
2675    Parser.Lex(); // Eat the comma.
2676    RegLoc = Parser.getTok().getLoc();
2677    int OldReg = Reg;
2678    Reg = tryParseRegister();
2679    if (Reg == -1) {
2680      Error(RegLoc, "register expected");
2681      return MatchOperand_ParseFail;
2682    }
2683    // vector register lists must be contiguous.
2684    // It's OK to use the enumeration values directly here rather, as the
2685    // VFP register classes have the enum sorted properly.
2686    //
2687    // The list is of D registers, but we also allow Q regs and just interpret
2688    // them as the two D sub-registers.
2689    if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) {
2690      Reg = getDRegFromQReg(Reg);
2691      if (Reg != OldReg + 1) {
2692        Error(RegLoc, "non-contiguous register range");
2693        return MatchOperand_ParseFail;
2694      }
2695      ++Reg;
2696      Count += 2;
2697      // Parse the lane specifier if present.
2698      VectorLaneTy NextLaneKind;
2699      unsigned NextLaneIndex;
2700      SMLoc EndLoc = Parser.getTok().getLoc();
2701      if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2702        return MatchOperand_ParseFail;
2703      if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2704        Error(EndLoc, "mismatched lane index in register list");
2705        return MatchOperand_ParseFail;
2706      }
2707      continue;
2708    }
2709    // Normal D register. Just check that it's contiguous and keep going.
2710    if (Reg != OldReg + 1) {
2711      Error(RegLoc, "non-contiguous register range");
2712      return MatchOperand_ParseFail;
2713    }
2714    ++Count;
2715    // Parse the lane specifier if present.
2716    VectorLaneTy NextLaneKind;
2717    unsigned NextLaneIndex;
2718    SMLoc EndLoc = Parser.getTok().getLoc();
2719    if (parseVectorLane(NextLaneKind, NextLaneIndex) != MatchOperand_Success)
2720      return MatchOperand_ParseFail;
2721    if (NextLaneKind != LaneKind || LaneIndex != NextLaneIndex) {
2722      Error(EndLoc, "mismatched lane index in register list");
2723      return MatchOperand_ParseFail;
2724    }
2725  }
2726
2727  SMLoc E = Parser.getTok().getLoc();
2728  if (Parser.getTok().isNot(AsmToken::RCurly)) {
2729    Error(E, "'}' expected");
2730    return MatchOperand_ParseFail;
2731  }
2732  Parser.Lex(); // Eat '}' token.
2733
2734  switch (LaneKind) {
2735  default:
2736    assert(0 && "unexpected lane kind in register list.");
2737  case NoLanes:
2738    Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E));
2739    break;
2740  case AllLanes:
2741    Operands.push_back(ARMOperand::CreateVectorListAllLanes(FirstReg, Count,
2742                                                            S, E));
2743    break;
2744  case IndexedLane:
2745    Operands.push_back(ARMOperand::CreateVectorListIndexed(FirstReg, Count,
2746                                                           LaneIndex, S, E));
2747    break;
2748  }
2749  return MatchOperand_Success;
2750}
2751
2752/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options.
2753ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2754parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2755  SMLoc S = Parser.getTok().getLoc();
2756  const AsmToken &Tok = Parser.getTok();
2757  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2758  StringRef OptStr = Tok.getString();
2759
2760  unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size()))
2761    .Case("sy",    ARM_MB::SY)
2762    .Case("st",    ARM_MB::ST)
2763    .Case("sh",    ARM_MB::ISH)
2764    .Case("ish",   ARM_MB::ISH)
2765    .Case("shst",  ARM_MB::ISHST)
2766    .Case("ishst", ARM_MB::ISHST)
2767    .Case("nsh",   ARM_MB::NSH)
2768    .Case("un",    ARM_MB::NSH)
2769    .Case("nshst", ARM_MB::NSHST)
2770    .Case("unst",  ARM_MB::NSHST)
2771    .Case("osh",   ARM_MB::OSH)
2772    .Case("oshst", ARM_MB::OSHST)
2773    .Default(~0U);
2774
2775  if (Opt == ~0U)
2776    return MatchOperand_NoMatch;
2777
2778  Parser.Lex(); // Eat identifier token.
2779  Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S));
2780  return MatchOperand_Success;
2781}
2782
2783/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction.
2784ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2785parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2786  SMLoc S = Parser.getTok().getLoc();
2787  const AsmToken &Tok = Parser.getTok();
2788  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2789  StringRef IFlagsStr = Tok.getString();
2790
2791  // An iflags string of "none" is interpreted to mean that none of the AIF
2792  // bits are set.  Not a terribly useful instruction, but a valid encoding.
2793  unsigned IFlags = 0;
2794  if (IFlagsStr != "none") {
2795        for (int i = 0, e = IFlagsStr.size(); i != e; ++i) {
2796      unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1))
2797        .Case("a", ARM_PROC::A)
2798        .Case("i", ARM_PROC::I)
2799        .Case("f", ARM_PROC::F)
2800        .Default(~0U);
2801
2802      // If some specific iflag is already set, it means that some letter is
2803      // present more than once, this is not acceptable.
2804      if (Flag == ~0U || (IFlags & Flag))
2805        return MatchOperand_NoMatch;
2806
2807      IFlags |= Flag;
2808    }
2809  }
2810
2811  Parser.Lex(); // Eat identifier token.
2812  Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S));
2813  return MatchOperand_Success;
2814}
2815
2816/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction.
2817ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2818parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2819  SMLoc S = Parser.getTok().getLoc();
2820  const AsmToken &Tok = Parser.getTok();
2821  assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier");
2822  StringRef Mask = Tok.getString();
2823
2824  if (isMClass()) {
2825    // See ARMv6-M 10.1.1
2826    unsigned FlagsVal = StringSwitch<unsigned>(Mask)
2827      .Case("apsr", 0)
2828      .Case("iapsr", 1)
2829      .Case("eapsr", 2)
2830      .Case("xpsr", 3)
2831      .Case("ipsr", 5)
2832      .Case("epsr", 6)
2833      .Case("iepsr", 7)
2834      .Case("msp", 8)
2835      .Case("psp", 9)
2836      .Case("primask", 16)
2837      .Case("basepri", 17)
2838      .Case("basepri_max", 18)
2839      .Case("faultmask", 19)
2840      .Case("control", 20)
2841      .Default(~0U);
2842
2843    if (FlagsVal == ~0U)
2844      return MatchOperand_NoMatch;
2845
2846    if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19)
2847      // basepri, basepri_max and faultmask only valid for V7m.
2848      return MatchOperand_NoMatch;
2849
2850    Parser.Lex(); // Eat identifier token.
2851    Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
2852    return MatchOperand_Success;
2853  }
2854
2855  // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf"
2856  size_t Start = 0, Next = Mask.find('_');
2857  StringRef Flags = "";
2858  std::string SpecReg = Mask.slice(Start, Next).lower();
2859  if (Next != StringRef::npos)
2860    Flags = Mask.slice(Next+1, Mask.size());
2861
2862  // FlagsVal contains the complete mask:
2863  // 3-0: Mask
2864  // 4: Special Reg (cpsr, apsr => 0; spsr => 1)
2865  unsigned FlagsVal = 0;
2866
2867  if (SpecReg == "apsr") {
2868    FlagsVal = StringSwitch<unsigned>(Flags)
2869    .Case("nzcvq",  0x8) // same as CPSR_f
2870    .Case("g",      0x4) // same as CPSR_s
2871    .Case("nzcvqg", 0xc) // same as CPSR_fs
2872    .Default(~0U);
2873
2874    if (FlagsVal == ~0U) {
2875      if (!Flags.empty())
2876        return MatchOperand_NoMatch;
2877      else
2878        FlagsVal = 8; // No flag
2879    }
2880  } else if (SpecReg == "cpsr" || SpecReg == "spsr") {
2881    if (Flags == "all") // cpsr_all is an alias for cpsr_fc
2882      Flags = "fc";
2883    for (int i = 0, e = Flags.size(); i != e; ++i) {
2884      unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1))
2885      .Case("c", 1)
2886      .Case("x", 2)
2887      .Case("s", 4)
2888      .Case("f", 8)
2889      .Default(~0U);
2890
2891      // If some specific flag is already set, it means that some letter is
2892      // present more than once, this is not acceptable.
2893      if (FlagsVal == ~0U || (FlagsVal & Flag))
2894        return MatchOperand_NoMatch;
2895      FlagsVal |= Flag;
2896    }
2897  } else // No match for special register.
2898    return MatchOperand_NoMatch;
2899
2900  // Special register without flags is NOT equivalent to "fc" flags.
2901  // NOTE: This is a divergence from gas' behavior.  Uncommenting the following
2902  // two lines would enable gas compatibility at the expense of breaking
2903  // round-tripping.
2904  //
2905  // if (!FlagsVal)
2906  //  FlagsVal = 0x9;
2907
2908  // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1)
2909  if (SpecReg == "spsr")
2910    FlagsVal |= 16;
2911
2912  Parser.Lex(); // Eat identifier token.
2913  Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S));
2914  return MatchOperand_Success;
2915}
2916
2917ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2918parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op,
2919            int Low, int High) {
2920  const AsmToken &Tok = Parser.getTok();
2921  if (Tok.isNot(AsmToken::Identifier)) {
2922    Error(Parser.getTok().getLoc(), Op + " operand expected.");
2923    return MatchOperand_ParseFail;
2924  }
2925  StringRef ShiftName = Tok.getString();
2926  std::string LowerOp = Op.lower();
2927  std::string UpperOp = Op.upper();
2928  if (ShiftName != LowerOp && ShiftName != UpperOp) {
2929    Error(Parser.getTok().getLoc(), Op + " operand expected.");
2930    return MatchOperand_ParseFail;
2931  }
2932  Parser.Lex(); // Eat shift type token.
2933
2934  // There must be a '#' and a shift amount.
2935  if (Parser.getTok().isNot(AsmToken::Hash)) {
2936    Error(Parser.getTok().getLoc(), "'#' expected");
2937    return MatchOperand_ParseFail;
2938  }
2939  Parser.Lex(); // Eat hash token.
2940
2941  const MCExpr *ShiftAmount;
2942  SMLoc Loc = Parser.getTok().getLoc();
2943  if (getParser().ParseExpression(ShiftAmount)) {
2944    Error(Loc, "illegal expression");
2945    return MatchOperand_ParseFail;
2946  }
2947  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
2948  if (!CE) {
2949    Error(Loc, "constant expression expected");
2950    return MatchOperand_ParseFail;
2951  }
2952  int Val = CE->getValue();
2953  if (Val < Low || Val > High) {
2954    Error(Loc, "immediate value out of range");
2955    return MatchOperand_ParseFail;
2956  }
2957
2958  Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc()));
2959
2960  return MatchOperand_Success;
2961}
2962
2963ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2964parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2965  const AsmToken &Tok = Parser.getTok();
2966  SMLoc S = Tok.getLoc();
2967  if (Tok.isNot(AsmToken::Identifier)) {
2968    Error(Tok.getLoc(), "'be' or 'le' operand expected");
2969    return MatchOperand_ParseFail;
2970  }
2971  int Val = StringSwitch<int>(Tok.getString())
2972    .Case("be", 1)
2973    .Case("le", 0)
2974    .Default(-1);
2975  Parser.Lex(); // Eat the token.
2976
2977  if (Val == -1) {
2978    Error(Tok.getLoc(), "'be' or 'le' operand expected");
2979    return MatchOperand_ParseFail;
2980  }
2981  Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val,
2982                                                                  getContext()),
2983                                           S, Parser.getTok().getLoc()));
2984  return MatchOperand_Success;
2985}
2986
2987/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT
2988/// instructions. Legal values are:
2989///     lsl #n  'n' in [0,31]
2990///     asr #n  'n' in [1,32]
2991///             n == 32 encoded as n == 0.
2992ARMAsmParser::OperandMatchResultTy ARMAsmParser::
2993parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
2994  const AsmToken &Tok = Parser.getTok();
2995  SMLoc S = Tok.getLoc();
2996  if (Tok.isNot(AsmToken::Identifier)) {
2997    Error(S, "shift operator 'asr' or 'lsl' expected");
2998    return MatchOperand_ParseFail;
2999  }
3000  StringRef ShiftName = Tok.getString();
3001  bool isASR;
3002  if (ShiftName == "lsl" || ShiftName == "LSL")
3003    isASR = false;
3004  else if (ShiftName == "asr" || ShiftName == "ASR")
3005    isASR = true;
3006  else {
3007    Error(S, "shift operator 'asr' or 'lsl' expected");
3008    return MatchOperand_ParseFail;
3009  }
3010  Parser.Lex(); // Eat the operator.
3011
3012  // A '#' and a shift amount.
3013  if (Parser.getTok().isNot(AsmToken::Hash)) {
3014    Error(Parser.getTok().getLoc(), "'#' expected");
3015    return MatchOperand_ParseFail;
3016  }
3017  Parser.Lex(); // Eat hash token.
3018
3019  const MCExpr *ShiftAmount;
3020  SMLoc E = Parser.getTok().getLoc();
3021  if (getParser().ParseExpression(ShiftAmount)) {
3022    Error(E, "malformed shift expression");
3023    return MatchOperand_ParseFail;
3024  }
3025  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3026  if (!CE) {
3027    Error(E, "shift amount must be an immediate");
3028    return MatchOperand_ParseFail;
3029  }
3030
3031  int64_t Val = CE->getValue();
3032  if (isASR) {
3033    // Shift amount must be in [1,32]
3034    if (Val < 1 || Val > 32) {
3035      Error(E, "'asr' shift amount must be in range [1,32]");
3036      return MatchOperand_ParseFail;
3037    }
3038    // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode.
3039    if (isThumb() && Val == 32) {
3040      Error(E, "'asr #32' shift amount not allowed in Thumb mode");
3041      return MatchOperand_ParseFail;
3042    }
3043    if (Val == 32) Val = 0;
3044  } else {
3045    // Shift amount must be in [1,32]
3046    if (Val < 0 || Val > 31) {
3047      Error(E, "'lsr' shift amount must be in range [0,31]");
3048      return MatchOperand_ParseFail;
3049    }
3050  }
3051
3052  E = Parser.getTok().getLoc();
3053  Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E));
3054
3055  return MatchOperand_Success;
3056}
3057
3058/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family
3059/// of instructions. Legal values are:
3060///     ror #n  'n' in {0, 8, 16, 24}
3061ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3062parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3063  const AsmToken &Tok = Parser.getTok();
3064  SMLoc S = Tok.getLoc();
3065  if (Tok.isNot(AsmToken::Identifier))
3066    return MatchOperand_NoMatch;
3067  StringRef ShiftName = Tok.getString();
3068  if (ShiftName != "ror" && ShiftName != "ROR")
3069    return MatchOperand_NoMatch;
3070  Parser.Lex(); // Eat the operator.
3071
3072  // A '#' and a rotate amount.
3073  if (Parser.getTok().isNot(AsmToken::Hash)) {
3074    Error(Parser.getTok().getLoc(), "'#' expected");
3075    return MatchOperand_ParseFail;
3076  }
3077  Parser.Lex(); // Eat hash token.
3078
3079  const MCExpr *ShiftAmount;
3080  SMLoc E = Parser.getTok().getLoc();
3081  if (getParser().ParseExpression(ShiftAmount)) {
3082    Error(E, "malformed rotate expression");
3083    return MatchOperand_ParseFail;
3084  }
3085  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount);
3086  if (!CE) {
3087    Error(E, "rotate amount must be an immediate");
3088    return MatchOperand_ParseFail;
3089  }
3090
3091  int64_t Val = CE->getValue();
3092  // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension)
3093  // normally, zero is represented in asm by omitting the rotate operand
3094  // entirely.
3095  if (Val != 8 && Val != 16 && Val != 24 && Val != 0) {
3096    Error(E, "'ror' rotate amount must be 8, 16, or 24");
3097    return MatchOperand_ParseFail;
3098  }
3099
3100  E = Parser.getTok().getLoc();
3101  Operands.push_back(ARMOperand::CreateRotImm(Val, S, E));
3102
3103  return MatchOperand_Success;
3104}
3105
3106ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3107parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3108  SMLoc S = Parser.getTok().getLoc();
3109  // The bitfield descriptor is really two operands, the LSB and the width.
3110  if (Parser.getTok().isNot(AsmToken::Hash)) {
3111    Error(Parser.getTok().getLoc(), "'#' expected");
3112    return MatchOperand_ParseFail;
3113  }
3114  Parser.Lex(); // Eat hash token.
3115
3116  const MCExpr *LSBExpr;
3117  SMLoc E = Parser.getTok().getLoc();
3118  if (getParser().ParseExpression(LSBExpr)) {
3119    Error(E, "malformed immediate expression");
3120    return MatchOperand_ParseFail;
3121  }
3122  const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr);
3123  if (!CE) {
3124    Error(E, "'lsb' operand must be an immediate");
3125    return MatchOperand_ParseFail;
3126  }
3127
3128  int64_t LSB = CE->getValue();
3129  // The LSB must be in the range [0,31]
3130  if (LSB < 0 || LSB > 31) {
3131    Error(E, "'lsb' operand must be in the range [0,31]");
3132    return MatchOperand_ParseFail;
3133  }
3134  E = Parser.getTok().getLoc();
3135
3136  // Expect another immediate operand.
3137  if (Parser.getTok().isNot(AsmToken::Comma)) {
3138    Error(Parser.getTok().getLoc(), "too few operands");
3139    return MatchOperand_ParseFail;
3140  }
3141  Parser.Lex(); // Eat hash token.
3142  if (Parser.getTok().isNot(AsmToken::Hash)) {
3143    Error(Parser.getTok().getLoc(), "'#' expected");
3144    return MatchOperand_ParseFail;
3145  }
3146  Parser.Lex(); // Eat hash token.
3147
3148  const MCExpr *WidthExpr;
3149  if (getParser().ParseExpression(WidthExpr)) {
3150    Error(E, "malformed immediate expression");
3151    return MatchOperand_ParseFail;
3152  }
3153  CE = dyn_cast<MCConstantExpr>(WidthExpr);
3154  if (!CE) {
3155    Error(E, "'width' operand must be an immediate");
3156    return MatchOperand_ParseFail;
3157  }
3158
3159  int64_t Width = CE->getValue();
3160  // The LSB must be in the range [1,32-lsb]
3161  if (Width < 1 || Width > 32 - LSB) {
3162    Error(E, "'width' operand must be in the range [1,32-lsb]");
3163    return MatchOperand_ParseFail;
3164  }
3165  E = Parser.getTok().getLoc();
3166
3167  Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E));
3168
3169  return MatchOperand_Success;
3170}
3171
3172ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3173parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3174  // Check for a post-index addressing register operand. Specifically:
3175  // postidx_reg := '+' register {, shift}
3176  //              | '-' register {, shift}
3177  //              | register {, shift}
3178
3179  // This method must return MatchOperand_NoMatch without consuming any tokens
3180  // in the case where there is no match, as other alternatives take other
3181  // parse methods.
3182  AsmToken Tok = Parser.getTok();
3183  SMLoc S = Tok.getLoc();
3184  bool haveEaten = false;
3185  bool isAdd = true;
3186  int Reg = -1;
3187  if (Tok.is(AsmToken::Plus)) {
3188    Parser.Lex(); // Eat the '+' token.
3189    haveEaten = true;
3190  } else if (Tok.is(AsmToken::Minus)) {
3191    Parser.Lex(); // Eat the '-' token.
3192    isAdd = false;
3193    haveEaten = true;
3194  }
3195  if (Parser.getTok().is(AsmToken::Identifier))
3196    Reg = tryParseRegister();
3197  if (Reg == -1) {
3198    if (!haveEaten)
3199      return MatchOperand_NoMatch;
3200    Error(Parser.getTok().getLoc(), "register expected");
3201    return MatchOperand_ParseFail;
3202  }
3203  SMLoc E = Parser.getTok().getLoc();
3204
3205  ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift;
3206  unsigned ShiftImm = 0;
3207  if (Parser.getTok().is(AsmToken::Comma)) {
3208    Parser.Lex(); // Eat the ','.
3209    if (parseMemRegOffsetShift(ShiftTy, ShiftImm))
3210      return MatchOperand_ParseFail;
3211  }
3212
3213  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy,
3214                                                  ShiftImm, S, E));
3215
3216  return MatchOperand_Success;
3217}
3218
3219ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3220parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3221  // Check for a post-index addressing register operand. Specifically:
3222  // am3offset := '+' register
3223  //              | '-' register
3224  //              | register
3225  //              | # imm
3226  //              | # + imm
3227  //              | # - imm
3228
3229  // This method must return MatchOperand_NoMatch without consuming any tokens
3230  // in the case where there is no match, as other alternatives take other
3231  // parse methods.
3232  AsmToken Tok = Parser.getTok();
3233  SMLoc S = Tok.getLoc();
3234
3235  // Do immediates first, as we always parse those if we have a '#'.
3236  if (Parser.getTok().is(AsmToken::Hash)) {
3237    Parser.Lex(); // Eat the '#'.
3238    // Explicitly look for a '-', as we need to encode negative zero
3239    // differently.
3240    bool isNegative = Parser.getTok().is(AsmToken::Minus);
3241    const MCExpr *Offset;
3242    if (getParser().ParseExpression(Offset))
3243      return MatchOperand_ParseFail;
3244    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3245    if (!CE) {
3246      Error(S, "constant expression expected");
3247      return MatchOperand_ParseFail;
3248    }
3249    SMLoc E = Tok.getLoc();
3250    // Negative zero is encoded as the flag value INT32_MIN.
3251    int32_t Val = CE->getValue();
3252    if (isNegative && Val == 0)
3253      Val = INT32_MIN;
3254
3255    Operands.push_back(
3256      ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E));
3257
3258    return MatchOperand_Success;
3259  }
3260
3261
3262  bool haveEaten = false;
3263  bool isAdd = true;
3264  int Reg = -1;
3265  if (Tok.is(AsmToken::Plus)) {
3266    Parser.Lex(); // Eat the '+' token.
3267    haveEaten = true;
3268  } else if (Tok.is(AsmToken::Minus)) {
3269    Parser.Lex(); // Eat the '-' token.
3270    isAdd = false;
3271    haveEaten = true;
3272  }
3273  if (Parser.getTok().is(AsmToken::Identifier))
3274    Reg = tryParseRegister();
3275  if (Reg == -1) {
3276    if (!haveEaten)
3277      return MatchOperand_NoMatch;
3278    Error(Parser.getTok().getLoc(), "register expected");
3279    return MatchOperand_ParseFail;
3280  }
3281  SMLoc E = Parser.getTok().getLoc();
3282
3283  Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift,
3284                                                  0, S, E));
3285
3286  return MatchOperand_Success;
3287}
3288
3289/// cvtT2LdrdPre - Convert parsed operands to MCInst.
3290/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3291/// when they refer multiple MIOperands inside a single one.
3292bool ARMAsmParser::
3293cvtT2LdrdPre(MCInst &Inst, unsigned Opcode,
3294             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3295  // Rt, Rt2
3296  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3297  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3298  // Create a writeback register dummy placeholder.
3299  Inst.addOperand(MCOperand::CreateReg(0));
3300  // addr
3301  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3302  // pred
3303  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3304  return true;
3305}
3306
3307/// cvtT2StrdPre - Convert parsed operands to MCInst.
3308/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3309/// when they refer multiple MIOperands inside a single one.
3310bool ARMAsmParser::
3311cvtT2StrdPre(MCInst &Inst, unsigned Opcode,
3312             const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3313  // Create a writeback register dummy placeholder.
3314  Inst.addOperand(MCOperand::CreateReg(0));
3315  // Rt, Rt2
3316  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3317  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3318  // addr
3319  ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2);
3320  // pred
3321  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3322  return true;
3323}
3324
3325/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3326/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3327/// when they refer multiple MIOperands inside a single one.
3328bool ARMAsmParser::
3329cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3330                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3331  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3332
3333  // Create a writeback register dummy placeholder.
3334  Inst.addOperand(MCOperand::CreateImm(0));
3335
3336  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3337  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3338  return true;
3339}
3340
3341/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst.
3342/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3343/// when they refer multiple MIOperands inside a single one.
3344bool ARMAsmParser::
3345cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode,
3346                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3347  // Create a writeback register dummy placeholder.
3348  Inst.addOperand(MCOperand::CreateImm(0));
3349  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3350  ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2);
3351  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3352  return true;
3353}
3354
3355/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3356/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3357/// when they refer multiple MIOperands inside a single one.
3358bool ARMAsmParser::
3359cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3360                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3361  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3362
3363  // Create a writeback register dummy placeholder.
3364  Inst.addOperand(MCOperand::CreateImm(0));
3365
3366  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3367  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3368  return true;
3369}
3370
3371/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3372/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3373/// when they refer multiple MIOperands inside a single one.
3374bool ARMAsmParser::
3375cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3376                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3377  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3378
3379  // Create a writeback register dummy placeholder.
3380  Inst.addOperand(MCOperand::CreateImm(0));
3381
3382  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3383  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3384  return true;
3385}
3386
3387
3388/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst.
3389/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3390/// when they refer multiple MIOperands inside a single one.
3391bool ARMAsmParser::
3392cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode,
3393                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3394  // Create a writeback register dummy placeholder.
3395  Inst.addOperand(MCOperand::CreateImm(0));
3396  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3397  ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2);
3398  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3399  return true;
3400}
3401
3402/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst.
3403/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3404/// when they refer multiple MIOperands inside a single one.
3405bool ARMAsmParser::
3406cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode,
3407                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3408  // Create a writeback register dummy placeholder.
3409  Inst.addOperand(MCOperand::CreateImm(0));
3410  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3411  ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3);
3412  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3413  return true;
3414}
3415
3416/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3417/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3418/// when they refer multiple MIOperands inside a single one.
3419bool ARMAsmParser::
3420cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3421                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3422  // Create a writeback register dummy placeholder.
3423  Inst.addOperand(MCOperand::CreateImm(0));
3424  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3425  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3426  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3427  return true;
3428}
3429
3430/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst.
3431/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3432/// when they refer multiple MIOperands inside a single one.
3433bool ARMAsmParser::
3434cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3435                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3436  // Rt
3437  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3438  // Create a writeback register dummy placeholder.
3439  Inst.addOperand(MCOperand::CreateImm(0));
3440  // addr
3441  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3442  // offset
3443  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3444  // pred
3445  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3446  return true;
3447}
3448
3449/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst.
3450/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3451/// when they refer multiple MIOperands inside a single one.
3452bool ARMAsmParser::
3453cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3454                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3455  // Rt
3456  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3457  // Create a writeback register dummy placeholder.
3458  Inst.addOperand(MCOperand::CreateImm(0));
3459  // addr
3460  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3461  // offset
3462  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3463  // pred
3464  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3465  return true;
3466}
3467
3468/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst.
3469/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3470/// when they refer multiple MIOperands inside a single one.
3471bool ARMAsmParser::
3472cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode,
3473                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3474  // Create a writeback register dummy placeholder.
3475  Inst.addOperand(MCOperand::CreateImm(0));
3476  // Rt
3477  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3478  // addr
3479  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3480  // offset
3481  ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1);
3482  // pred
3483  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3484  return true;
3485}
3486
3487/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst.
3488/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3489/// when they refer multiple MIOperands inside a single one.
3490bool ARMAsmParser::
3491cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode,
3492                      const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3493  // Create a writeback register dummy placeholder.
3494  Inst.addOperand(MCOperand::CreateImm(0));
3495  // Rt
3496  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3497  // addr
3498  ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1);
3499  // offset
3500  ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2);
3501  // pred
3502  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3503  return true;
3504}
3505
3506/// cvtLdrdPre - Convert parsed operands to MCInst.
3507/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3508/// when they refer multiple MIOperands inside a single one.
3509bool ARMAsmParser::
3510cvtLdrdPre(MCInst &Inst, unsigned Opcode,
3511           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3512  // Rt, Rt2
3513  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3514  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3515  // Create a writeback register dummy placeholder.
3516  Inst.addOperand(MCOperand::CreateImm(0));
3517  // addr
3518  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3519  // pred
3520  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3521  return true;
3522}
3523
3524/// cvtStrdPre - Convert parsed operands to MCInst.
3525/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3526/// when they refer multiple MIOperands inside a single one.
3527bool ARMAsmParser::
3528cvtStrdPre(MCInst &Inst, unsigned Opcode,
3529           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3530  // Create a writeback register dummy placeholder.
3531  Inst.addOperand(MCOperand::CreateImm(0));
3532  // Rt, Rt2
3533  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3534  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3535  // addr
3536  ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3);
3537  // pred
3538  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3539  return true;
3540}
3541
3542/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst.
3543/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3544/// when they refer multiple MIOperands inside a single one.
3545bool ARMAsmParser::
3546cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode,
3547                         const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3548  ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1);
3549  // Create a writeback register dummy placeholder.
3550  Inst.addOperand(MCOperand::CreateImm(0));
3551  ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3);
3552  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3553  return true;
3554}
3555
3556/// cvtThumbMultiple- Convert parsed operands to MCInst.
3557/// Needed here because the Asm Gen Matcher can't handle properly tied operands
3558/// when they refer multiple MIOperands inside a single one.
3559bool ARMAsmParser::
3560cvtThumbMultiply(MCInst &Inst, unsigned Opcode,
3561           const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3562  // The second source operand must be the same register as the destination
3563  // operand.
3564  if (Operands.size() == 6 &&
3565      (((ARMOperand*)Operands[3])->getReg() !=
3566       ((ARMOperand*)Operands[5])->getReg()) &&
3567      (((ARMOperand*)Operands[3])->getReg() !=
3568       ((ARMOperand*)Operands[4])->getReg())) {
3569    Error(Operands[3]->getStartLoc(),
3570          "destination register must match source register");
3571    return false;
3572  }
3573  ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1);
3574  ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1);
3575  // If we have a three-operand form, make sure to set Rn to be the operand
3576  // that isn't the same as Rd.
3577  unsigned RegOp = 4;
3578  if (Operands.size() == 6 &&
3579      ((ARMOperand*)Operands[4])->getReg() ==
3580        ((ARMOperand*)Operands[3])->getReg())
3581    RegOp = 5;
3582  ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1);
3583  Inst.addOperand(Inst.getOperand(0));
3584  ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2);
3585
3586  return true;
3587}
3588
3589bool ARMAsmParser::
3590cvtVLDwbFixed(MCInst &Inst, unsigned Opcode,
3591              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3592  // Vd
3593  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3594  // Create a writeback register dummy placeholder.
3595  Inst.addOperand(MCOperand::CreateImm(0));
3596  // Vn
3597  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3598  // pred
3599  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3600  return true;
3601}
3602
3603bool ARMAsmParser::
3604cvtVLDwbRegister(MCInst &Inst, unsigned Opcode,
3605                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3606  // Vd
3607  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3608  // Create a writeback register dummy placeholder.
3609  Inst.addOperand(MCOperand::CreateImm(0));
3610  // Vn
3611  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3612  // Vm
3613  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3614  // pred
3615  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3616  return true;
3617}
3618
3619bool ARMAsmParser::
3620cvtVSTwbFixed(MCInst &Inst, unsigned Opcode,
3621              const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3622  // Create a writeback register dummy placeholder.
3623  Inst.addOperand(MCOperand::CreateImm(0));
3624  // Vn
3625  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3626  // Vt
3627  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3628  // pred
3629  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3630  return true;
3631}
3632
3633bool ARMAsmParser::
3634cvtVSTwbRegister(MCInst &Inst, unsigned Opcode,
3635                 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3636  // Create a writeback register dummy placeholder.
3637  Inst.addOperand(MCOperand::CreateImm(0));
3638  // Vn
3639  ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2);
3640  // Vm
3641  ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1);
3642  // Vt
3643  ((ARMOperand*)Operands[3])->addVecListOperands(Inst, 1);
3644  // pred
3645  ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2);
3646  return true;
3647}
3648
3649/// Parse an ARM memory expression, return false if successful else return true
3650/// or an error.  The first token must be a '[' when called.
3651bool ARMAsmParser::
3652parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3653  SMLoc S, E;
3654  assert(Parser.getTok().is(AsmToken::LBrac) &&
3655         "Token is not a Left Bracket");
3656  S = Parser.getTok().getLoc();
3657  Parser.Lex(); // Eat left bracket token.
3658
3659  const AsmToken &BaseRegTok = Parser.getTok();
3660  int BaseRegNum = tryParseRegister();
3661  if (BaseRegNum == -1)
3662    return Error(BaseRegTok.getLoc(), "register expected");
3663
3664  // The next token must either be a comma or a closing bracket.
3665  const AsmToken &Tok = Parser.getTok();
3666  if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac))
3667    return Error(Tok.getLoc(), "malformed memory operand");
3668
3669  if (Tok.is(AsmToken::RBrac)) {
3670    E = Tok.getLoc();
3671    Parser.Lex(); // Eat right bracket token.
3672
3673    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift,
3674                                             0, 0, false, S, E));
3675
3676    // If there's a pre-indexing writeback marker, '!', just add it as a token
3677    // operand. It's rather odd, but syntactically valid.
3678    if (Parser.getTok().is(AsmToken::Exclaim)) {
3679      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3680      Parser.Lex(); // Eat the '!'.
3681    }
3682
3683    return false;
3684  }
3685
3686  assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!");
3687  Parser.Lex(); // Eat the comma.
3688
3689  // If we have a ':', it's an alignment specifier.
3690  if (Parser.getTok().is(AsmToken::Colon)) {
3691    Parser.Lex(); // Eat the ':'.
3692    E = Parser.getTok().getLoc();
3693
3694    const MCExpr *Expr;
3695    if (getParser().ParseExpression(Expr))
3696     return true;
3697
3698    // The expression has to be a constant. Memory references with relocations
3699    // don't come through here, as they use the <label> forms of the relevant
3700    // instructions.
3701    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3702    if (!CE)
3703      return Error (E, "constant expression expected");
3704
3705    unsigned Align = 0;
3706    switch (CE->getValue()) {
3707    default:
3708      return Error(E, "alignment specifier must be 64, 128, or 256 bits");
3709    case 64:  Align = 8; break;
3710    case 128: Align = 16; break;
3711    case 256: Align = 32; break;
3712    }
3713
3714    // Now we should have the closing ']'
3715    E = Parser.getTok().getLoc();
3716    if (Parser.getTok().isNot(AsmToken::RBrac))
3717      return Error(E, "']' expected");
3718    Parser.Lex(); // Eat right bracket token.
3719
3720    // Don't worry about range checking the value here. That's handled by
3721    // the is*() predicates.
3722    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0,
3723                                             ARM_AM::no_shift, 0, Align,
3724                                             false, S, E));
3725
3726    // If there's a pre-indexing writeback marker, '!', just add it as a token
3727    // operand.
3728    if (Parser.getTok().is(AsmToken::Exclaim)) {
3729      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3730      Parser.Lex(); // Eat the '!'.
3731    }
3732
3733    return false;
3734  }
3735
3736  // If we have a '#', it's an immediate offset, else assume it's a register
3737  // offset. Be friendly and also accept a plain integer (without a leading
3738  // hash) for gas compatibility.
3739  if (Parser.getTok().is(AsmToken::Hash) ||
3740      Parser.getTok().is(AsmToken::Integer)) {
3741    if (Parser.getTok().is(AsmToken::Hash))
3742      Parser.Lex(); // Eat the '#'.
3743    E = Parser.getTok().getLoc();
3744
3745    bool isNegative = getParser().getTok().is(AsmToken::Minus);
3746    const MCExpr *Offset;
3747    if (getParser().ParseExpression(Offset))
3748     return true;
3749
3750    // The expression has to be a constant. Memory references with relocations
3751    // don't come through here, as they use the <label> forms of the relevant
3752    // instructions.
3753    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset);
3754    if (!CE)
3755      return Error (E, "constant expression expected");
3756
3757    // If the constant was #-0, represent it as INT32_MIN.
3758    int32_t Val = CE->getValue();
3759    if (isNegative && Val == 0)
3760      CE = MCConstantExpr::Create(INT32_MIN, getContext());
3761
3762    // Now we should have the closing ']'
3763    E = Parser.getTok().getLoc();
3764    if (Parser.getTok().isNot(AsmToken::RBrac))
3765      return Error(E, "']' expected");
3766    Parser.Lex(); // Eat right bracket token.
3767
3768    // Don't worry about range checking the value here. That's handled by
3769    // the is*() predicates.
3770    Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0,
3771                                             ARM_AM::no_shift, 0, 0,
3772                                             false, S, E));
3773
3774    // If there's a pre-indexing writeback marker, '!', just add it as a token
3775    // operand.
3776    if (Parser.getTok().is(AsmToken::Exclaim)) {
3777      Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3778      Parser.Lex(); // Eat the '!'.
3779    }
3780
3781    return false;
3782  }
3783
3784  // The register offset is optionally preceded by a '+' or '-'
3785  bool isNegative = false;
3786  if (Parser.getTok().is(AsmToken::Minus)) {
3787    isNegative = true;
3788    Parser.Lex(); // Eat the '-'.
3789  } else if (Parser.getTok().is(AsmToken::Plus)) {
3790    // Nothing to do.
3791    Parser.Lex(); // Eat the '+'.
3792  }
3793
3794  E = Parser.getTok().getLoc();
3795  int OffsetRegNum = tryParseRegister();
3796  if (OffsetRegNum == -1)
3797    return Error(E, "register expected");
3798
3799  // If there's a shift operator, handle it.
3800  ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift;
3801  unsigned ShiftImm = 0;
3802  if (Parser.getTok().is(AsmToken::Comma)) {
3803    Parser.Lex(); // Eat the ','.
3804    if (parseMemRegOffsetShift(ShiftType, ShiftImm))
3805      return true;
3806  }
3807
3808  // Now we should have the closing ']'
3809  E = Parser.getTok().getLoc();
3810  if (Parser.getTok().isNot(AsmToken::RBrac))
3811    return Error(E, "']' expected");
3812  Parser.Lex(); // Eat right bracket token.
3813
3814  Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum,
3815                                           ShiftType, ShiftImm, 0, isNegative,
3816                                           S, E));
3817
3818  // If there's a pre-indexing writeback marker, '!', just add it as a token
3819  // operand.
3820  if (Parser.getTok().is(AsmToken::Exclaim)) {
3821    Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc()));
3822    Parser.Lex(); // Eat the '!'.
3823  }
3824
3825  return false;
3826}
3827
3828/// parseMemRegOffsetShift - one of these two:
3829///   ( lsl | lsr | asr | ror ) , # shift_amount
3830///   rrx
3831/// return true if it parses a shift otherwise it returns false.
3832bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St,
3833                                          unsigned &Amount) {
3834  SMLoc Loc = Parser.getTok().getLoc();
3835  const AsmToken &Tok = Parser.getTok();
3836  if (Tok.isNot(AsmToken::Identifier))
3837    return true;
3838  StringRef ShiftName = Tok.getString();
3839  if (ShiftName == "lsl" || ShiftName == "LSL")
3840    St = ARM_AM::lsl;
3841  else if (ShiftName == "lsr" || ShiftName == "LSR")
3842    St = ARM_AM::lsr;
3843  else if (ShiftName == "asr" || ShiftName == "ASR")
3844    St = ARM_AM::asr;
3845  else if (ShiftName == "ror" || ShiftName == "ROR")
3846    St = ARM_AM::ror;
3847  else if (ShiftName == "rrx" || ShiftName == "RRX")
3848    St = ARM_AM::rrx;
3849  else
3850    return Error(Loc, "illegal shift operator");
3851  Parser.Lex(); // Eat shift type token.
3852
3853  // rrx stands alone.
3854  Amount = 0;
3855  if (St != ARM_AM::rrx) {
3856    Loc = Parser.getTok().getLoc();
3857    // A '#' and a shift amount.
3858    const AsmToken &HashTok = Parser.getTok();
3859    if (HashTok.isNot(AsmToken::Hash))
3860      return Error(HashTok.getLoc(), "'#' expected");
3861    Parser.Lex(); // Eat hash token.
3862
3863    const MCExpr *Expr;
3864    if (getParser().ParseExpression(Expr))
3865      return true;
3866    // Range check the immediate.
3867    // lsl, ror: 0 <= imm <= 31
3868    // lsr, asr: 0 <= imm <= 32
3869    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr);
3870    if (!CE)
3871      return Error(Loc, "shift amount must be an immediate");
3872    int64_t Imm = CE->getValue();
3873    if (Imm < 0 ||
3874        ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) ||
3875        ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32))
3876      return Error(Loc, "immediate shift value out of range");
3877    Amount = Imm;
3878  }
3879
3880  return false;
3881}
3882
3883/// parseFPImm - A floating point immediate expression operand.
3884ARMAsmParser::OperandMatchResultTy ARMAsmParser::
3885parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
3886  SMLoc S = Parser.getTok().getLoc();
3887
3888  if (Parser.getTok().isNot(AsmToken::Hash))
3889    return MatchOperand_NoMatch;
3890
3891  // Disambiguate the VMOV forms that can accept an FP immediate.
3892  // vmov.f32 <sreg>, #imm
3893  // vmov.f64 <dreg>, #imm
3894  // vmov.f32 <dreg>, #imm  @ vector f32x2
3895  // vmov.f32 <qreg>, #imm  @ vector f32x4
3896  //
3897  // There are also the NEON VMOV instructions which expect an
3898  // integer constant. Make sure we don't try to parse an FPImm
3899  // for these:
3900  // vmov.i{8|16|32|64} <dreg|qreg>, #imm
3901  ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]);
3902  if (!TyOp->isToken() || (TyOp->getToken() != ".f32" &&
3903                           TyOp->getToken() != ".f64"))
3904    return MatchOperand_NoMatch;
3905
3906  Parser.Lex(); // Eat the '#'.
3907
3908  // Handle negation, as that still comes through as a separate token.
3909  bool isNegative = false;
3910  if (Parser.getTok().is(AsmToken::Minus)) {
3911    isNegative = true;
3912    Parser.Lex();
3913  }
3914  const AsmToken &Tok = Parser.getTok();
3915  if (Tok.is(AsmToken::Real)) {
3916    APFloat RealVal(APFloat::IEEEdouble, Tok.getString());
3917    uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue();
3918    // If we had a '-' in front, toggle the sign bit.
3919    IntVal ^= (uint64_t)isNegative << 63;
3920    int Val = ARM_AM::getFP64Imm(APInt(64, IntVal));
3921    Parser.Lex(); // Eat the token.
3922    if (Val == -1) {
3923      TokError("floating point value out of range");
3924      return MatchOperand_ParseFail;
3925    }
3926    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
3927    return MatchOperand_Success;
3928  }
3929  if (Tok.is(AsmToken::Integer)) {
3930    int64_t Val = Tok.getIntVal();
3931    Parser.Lex(); // Eat the token.
3932    if (Val > 255 || Val < 0) {
3933      TokError("encoded floating point value out of range");
3934      return MatchOperand_ParseFail;
3935    }
3936    Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext()));
3937    return MatchOperand_Success;
3938  }
3939
3940  TokError("invalid floating point immediate");
3941  return MatchOperand_ParseFail;
3942}
3943/// Parse a arm instruction operand.  For now this parses the operand regardless
3944/// of the mnemonic.
3945bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands,
3946                                StringRef Mnemonic) {
3947  SMLoc S, E;
3948
3949  // Check if the current operand has a custom associated parser, if so, try to
3950  // custom parse the operand, or fallback to the general approach.
3951  OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic);
3952  if (ResTy == MatchOperand_Success)
3953    return false;
3954  // If there wasn't a custom match, try the generic matcher below. Otherwise,
3955  // there was a match, but an error occurred, in which case, just return that
3956  // the operand parsing failed.
3957  if (ResTy == MatchOperand_ParseFail)
3958    return true;
3959
3960  switch (getLexer().getKind()) {
3961  default:
3962    Error(Parser.getTok().getLoc(), "unexpected token in operand");
3963    return true;
3964  case AsmToken::Identifier: {
3965    // If this is VMRS, check for the apsr_nzcv operand.
3966    if (!tryParseRegisterWithWriteBack(Operands))
3967      return false;
3968    int Res = tryParseShiftRegister(Operands);
3969    if (Res == 0) // success
3970      return false;
3971    else if (Res == -1) // irrecoverable error
3972      return true;
3973    if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") {
3974      S = Parser.getTok().getLoc();
3975      Parser.Lex();
3976      Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S));
3977      return false;
3978    }
3979
3980    // Fall though for the Identifier case that is not a register or a
3981    // special name.
3982  }
3983  case AsmToken::LParen:  // parenthesized expressions like (_strcmp-4)
3984  case AsmToken::Integer: // things like 1f and 2b as a branch targets
3985  case AsmToken::String:  // quoted label names.
3986  case AsmToken::Dot: {   // . as a branch target
3987    // This was not a register so parse other operands that start with an
3988    // identifier (like labels) as expressions and create them as immediates.
3989    const MCExpr *IdVal;
3990    S = Parser.getTok().getLoc();
3991    if (getParser().ParseExpression(IdVal))
3992      return true;
3993    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
3994    Operands.push_back(ARMOperand::CreateImm(IdVal, S, E));
3995    return false;
3996  }
3997  case AsmToken::LBrac:
3998    return parseMemory(Operands);
3999  case AsmToken::LCurly:
4000    return parseRegisterList(Operands);
4001  case AsmToken::Hash: {
4002    // #42 -> immediate.
4003    // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate
4004    S = Parser.getTok().getLoc();
4005    Parser.Lex();
4006    bool isNegative = Parser.getTok().is(AsmToken::Minus);
4007    const MCExpr *ImmVal;
4008    if (getParser().ParseExpression(ImmVal))
4009      return true;
4010    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal);
4011    if (CE) {
4012      int32_t Val = CE->getValue();
4013      if (isNegative && Val == 0)
4014        ImmVal = MCConstantExpr::Create(INT32_MIN, getContext());
4015    }
4016    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4017    Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E));
4018    return false;
4019  }
4020  case AsmToken::Colon: {
4021    // ":lower16:" and ":upper16:" expression prefixes
4022    // FIXME: Check it's an expression prefix,
4023    // e.g. (FOO - :lower16:BAR) isn't legal.
4024    ARMMCExpr::VariantKind RefKind;
4025    if (parsePrefix(RefKind))
4026      return true;
4027
4028    const MCExpr *SubExprVal;
4029    if (getParser().ParseExpression(SubExprVal))
4030      return true;
4031
4032    const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal,
4033                                                   getContext());
4034    E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1);
4035    Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E));
4036    return false;
4037  }
4038  }
4039}
4040
4041// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e.
4042//  :lower16: and :upper16:.
4043bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) {
4044  RefKind = ARMMCExpr::VK_ARM_None;
4045
4046  // :lower16: and :upper16: modifiers
4047  assert(getLexer().is(AsmToken::Colon) && "expected a :");
4048  Parser.Lex(); // Eat ':'
4049
4050  if (getLexer().isNot(AsmToken::Identifier)) {
4051    Error(Parser.getTok().getLoc(), "expected prefix identifier in operand");
4052    return true;
4053  }
4054
4055  StringRef IDVal = Parser.getTok().getIdentifier();
4056  if (IDVal == "lower16") {
4057    RefKind = ARMMCExpr::VK_ARM_LO16;
4058  } else if (IDVal == "upper16") {
4059    RefKind = ARMMCExpr::VK_ARM_HI16;
4060  } else {
4061    Error(Parser.getTok().getLoc(), "unexpected prefix in operand");
4062    return true;
4063  }
4064  Parser.Lex();
4065
4066  if (getLexer().isNot(AsmToken::Colon)) {
4067    Error(Parser.getTok().getLoc(), "unexpected token after prefix");
4068    return true;
4069  }
4070  Parser.Lex(); // Eat the last ':'
4071  return false;
4072}
4073
4074/// \brief Given a mnemonic, split out possible predication code and carry
4075/// setting letters to form a canonical mnemonic and flags.
4076//
4077// FIXME: Would be nice to autogen this.
4078// FIXME: This is a bit of a maze of special cases.
4079StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic,
4080                                      unsigned &PredicationCode,
4081                                      bool &CarrySetting,
4082                                      unsigned &ProcessorIMod,
4083                                      StringRef &ITMask) {
4084  PredicationCode = ARMCC::AL;
4085  CarrySetting = false;
4086  ProcessorIMod = 0;
4087
4088  // Ignore some mnemonics we know aren't predicated forms.
4089  //
4090  // FIXME: Would be nice to autogen this.
4091  if ((Mnemonic == "movs" && isThumb()) ||
4092      Mnemonic == "teq"   || Mnemonic == "vceq"   || Mnemonic == "svc"   ||
4093      Mnemonic == "mls"   || Mnemonic == "smmls"  || Mnemonic == "vcls"  ||
4094      Mnemonic == "vmls"  || Mnemonic == "vnmls"  || Mnemonic == "vacge" ||
4095      Mnemonic == "vcge"  || Mnemonic == "vclt"   || Mnemonic == "vacgt" ||
4096      Mnemonic == "vcgt"  || Mnemonic == "vcle"   || Mnemonic == "smlal" ||
4097      Mnemonic == "umaal" || Mnemonic == "umlal"  || Mnemonic == "vabal" ||
4098      Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal")
4099    return Mnemonic;
4100
4101  // First, split out any predication code. Ignore mnemonics we know aren't
4102  // predicated but do have a carry-set and so weren't caught above.
4103  if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" &&
4104      Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" &&
4105      Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" &&
4106      Mnemonic != "sbcs" && Mnemonic != "rscs") {
4107    unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2))
4108      .Case("eq", ARMCC::EQ)
4109      .Case("ne", ARMCC::NE)
4110      .Case("hs", ARMCC::HS)
4111      .Case("cs", ARMCC::HS)
4112      .Case("lo", ARMCC::LO)
4113      .Case("cc", ARMCC::LO)
4114      .Case("mi", ARMCC::MI)
4115      .Case("pl", ARMCC::PL)
4116      .Case("vs", ARMCC::VS)
4117      .Case("vc", ARMCC::VC)
4118      .Case("hi", ARMCC::HI)
4119      .Case("ls", ARMCC::LS)
4120      .Case("ge", ARMCC::GE)
4121      .Case("lt", ARMCC::LT)
4122      .Case("gt", ARMCC::GT)
4123      .Case("le", ARMCC::LE)
4124      .Case("al", ARMCC::AL)
4125      .Default(~0U);
4126    if (CC != ~0U) {
4127      Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2);
4128      PredicationCode = CC;
4129    }
4130  }
4131
4132  // Next, determine if we have a carry setting bit. We explicitly ignore all
4133  // the instructions we know end in 's'.
4134  if (Mnemonic.endswith("s") &&
4135      !(Mnemonic == "cps" || Mnemonic == "mls" ||
4136        Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" ||
4137        Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" ||
4138        Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" ||
4139        Mnemonic == "vrsqrts" || Mnemonic == "srs" ||
4140        (Mnemonic == "movs" && isThumb()))) {
4141    Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1);
4142    CarrySetting = true;
4143  }
4144
4145  // The "cps" instruction can have a interrupt mode operand which is glued into
4146  // the mnemonic. Check if this is the case, split it and parse the imod op
4147  if (Mnemonic.startswith("cps")) {
4148    // Split out any imod code.
4149    unsigned IMod =
4150      StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2))
4151      .Case("ie", ARM_PROC::IE)
4152      .Case("id", ARM_PROC::ID)
4153      .Default(~0U);
4154    if (IMod != ~0U) {
4155      Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2);
4156      ProcessorIMod = IMod;
4157    }
4158  }
4159
4160  // The "it" instruction has the condition mask on the end of the mnemonic.
4161  if (Mnemonic.startswith("it")) {
4162    ITMask = Mnemonic.slice(2, Mnemonic.size());
4163    Mnemonic = Mnemonic.slice(0, 2);
4164  }
4165
4166  return Mnemonic;
4167}
4168
4169/// \brief Given a canonical mnemonic, determine if the instruction ever allows
4170/// inclusion of carry set or predication code operands.
4171//
4172// FIXME: It would be nice to autogen this.
4173void ARMAsmParser::
4174getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet,
4175                      bool &CanAcceptPredicationCode) {
4176  if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" ||
4177      Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" ||
4178      Mnemonic == "add" || Mnemonic == "adc" ||
4179      Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" ||
4180      Mnemonic == "orr" || Mnemonic == "mvn" ||
4181      Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" ||
4182      Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" ||
4183      (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" ||
4184                      Mnemonic == "mla" || Mnemonic == "smlal" ||
4185                      Mnemonic == "umlal" || Mnemonic == "umull"))) {
4186    CanAcceptCarrySet = true;
4187  } else
4188    CanAcceptCarrySet = false;
4189
4190  if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" ||
4191      Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" ||
4192      Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" ||
4193      Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" ||
4194      Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" ||
4195      (Mnemonic == "clrex" && !isThumb()) ||
4196      (Mnemonic == "nop" && isThumbOne()) ||
4197      ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" ||
4198        Mnemonic == "ldc2" || Mnemonic == "ldc2l" ||
4199        Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) ||
4200      ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) &&
4201       !isThumb()) ||
4202      Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) {
4203    CanAcceptPredicationCode = false;
4204  } else
4205    CanAcceptPredicationCode = true;
4206
4207  if (isThumb()) {
4208    if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" ||
4209        Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp")
4210      CanAcceptPredicationCode = false;
4211  }
4212}
4213
4214bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic,
4215                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4216  // FIXME: This is all horribly hacky. We really need a better way to deal
4217  // with optional operands like this in the matcher table.
4218
4219  // The 'mov' mnemonic is special. One variant has a cc_out operand, while
4220  // another does not. Specifically, the MOVW instruction does not. So we
4221  // special case it here and remove the defaulted (non-setting) cc_out
4222  // operand if that's the instruction we're trying to match.
4223  //
4224  // We do this as post-processing of the explicit operands rather than just
4225  // conditionally adding the cc_out in the first place because we need
4226  // to check the type of the parsed immediate operand.
4227  if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() &&
4228      !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() &&
4229      static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() &&
4230      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4231    return true;
4232
4233  // Register-register 'add' for thumb does not have a cc_out operand
4234  // when there are only two register operands.
4235  if (isThumb() && Mnemonic == "add" && Operands.size() == 5 &&
4236      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4237      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4238      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4239    return true;
4240  // Register-register 'add' for thumb does not have a cc_out operand
4241  // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do
4242  // have to check the immediate range here since Thumb2 has a variant
4243  // that can handle a different range and has a cc_out operand.
4244  if (((isThumb() && Mnemonic == "add") ||
4245       (isThumbTwo() && Mnemonic == "sub")) &&
4246      Operands.size() == 6 &&
4247      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4248      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4249      static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP &&
4250      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4251      (static_cast<ARMOperand*>(Operands[5])->isReg() ||
4252       static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4()))
4253    return true;
4254  // For Thumb2, add/sub immediate does not have a cc_out operand for the
4255  // imm0_4095 variant. That's the least-preferred variant when
4256  // selecting via the generic "add" mnemonic, so to know that we
4257  // should remove the cc_out operand, we have to explicitly check that
4258  // it's not one of the other variants. Ugh.
4259  if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") &&
4260      Operands.size() == 6 &&
4261      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4262      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4263      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4264    // Nest conditions rather than one big 'if' statement for readability.
4265    //
4266    // If either register is a high reg, it's either one of the SP
4267    // variants (handled above) or a 32-bit encoding, so we just
4268    // check against T3.
4269    if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4270         !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) &&
4271        static_cast<ARMOperand*>(Operands[5])->isT2SOImm())
4272      return false;
4273    // If both registers are low, we're in an IT block, and the immediate is
4274    // in range, we should use encoding T1 instead, which has a cc_out.
4275    if (inITBlock() &&
4276        isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) &&
4277        isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) &&
4278        static_cast<ARMOperand*>(Operands[5])->isImm0_7())
4279      return false;
4280
4281    // Otherwise, we use encoding T4, which does not have a cc_out
4282    // operand.
4283    return true;
4284  }
4285
4286  // The thumb2 multiply instruction doesn't have a CCOut register, so
4287  // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to
4288  // use the 16-bit encoding or not.
4289  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 &&
4290      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4291      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4292      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4293      static_cast<ARMOperand*>(Operands[5])->isReg() &&
4294      // If the registers aren't low regs, the destination reg isn't the
4295      // same as one of the source regs, or the cc_out operand is zero
4296      // outside of an IT block, we have to use the 32-bit encoding, so
4297      // remove the cc_out operand.
4298      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4299       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4300       !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) ||
4301       !inITBlock() ||
4302       (static_cast<ARMOperand*>(Operands[3])->getReg() !=
4303        static_cast<ARMOperand*>(Operands[5])->getReg() &&
4304        static_cast<ARMOperand*>(Operands[3])->getReg() !=
4305        static_cast<ARMOperand*>(Operands[4])->getReg())))
4306    return true;
4307
4308  // Also check the 'mul' syntax variant that doesn't specify an explicit
4309  // destination register.
4310  if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 &&
4311      static_cast<ARMOperand*>(Operands[1])->getReg() == 0 &&
4312      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4313      static_cast<ARMOperand*>(Operands[4])->isReg() &&
4314      // If the registers aren't low regs  or the cc_out operand is zero
4315      // outside of an IT block, we have to use the 32-bit encoding, so
4316      // remove the cc_out operand.
4317      (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) ||
4318       !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) ||
4319       !inITBlock()))
4320    return true;
4321
4322
4323
4324  // Register-register 'add/sub' for thumb does not have a cc_out operand
4325  // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also
4326  // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't
4327  // right, this will result in better diagnostics (which operand is off)
4328  // anyway.
4329  if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") &&
4330      (Operands.size() == 5 || Operands.size() == 6) &&
4331      static_cast<ARMOperand*>(Operands[3])->isReg() &&
4332      static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP &&
4333      static_cast<ARMOperand*>(Operands[1])->getReg() == 0)
4334    return true;
4335
4336  return false;
4337}
4338
4339static bool isDataTypeToken(StringRef Tok) {
4340  return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" ||
4341    Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" ||
4342    Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" ||
4343    Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" ||
4344    Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" ||
4345    Tok == ".f" || Tok == ".d";
4346}
4347
4348// FIXME: This bit should probably be handled via an explicit match class
4349// in the .td files that matches the suffix instead of having it be
4350// a literal string token the way it is now.
4351static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) {
4352  return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm");
4353}
4354
4355/// Parse an arm instruction mnemonic followed by its operands.
4356bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc,
4357                               SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4358  // Create the leading tokens for the mnemonic, split by '.' characters.
4359  size_t Start = 0, Next = Name.find('.');
4360  StringRef Mnemonic = Name.slice(Start, Next);
4361
4362  // Split out the predication code and carry setting flag from the mnemonic.
4363  unsigned PredicationCode;
4364  unsigned ProcessorIMod;
4365  bool CarrySetting;
4366  StringRef ITMask;
4367  Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting,
4368                           ProcessorIMod, ITMask);
4369
4370  // In Thumb1, only the branch (B) instruction can be predicated.
4371  if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") {
4372    Parser.EatToEndOfStatement();
4373    return Error(NameLoc, "conditional execution not supported in Thumb1");
4374  }
4375
4376  Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc));
4377
4378  // Handle the IT instruction ITMask. Convert it to a bitmask. This
4379  // is the mask as it will be for the IT encoding if the conditional
4380  // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case
4381  // where the conditional bit0 is zero, the instruction post-processing
4382  // will adjust the mask accordingly.
4383  if (Mnemonic == "it") {
4384    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2);
4385    if (ITMask.size() > 3) {
4386      Parser.EatToEndOfStatement();
4387      return Error(Loc, "too many conditions on IT instruction");
4388    }
4389    unsigned Mask = 8;
4390    for (unsigned i = ITMask.size(); i != 0; --i) {
4391      char pos = ITMask[i - 1];
4392      if (pos != 't' && pos != 'e') {
4393        Parser.EatToEndOfStatement();
4394        return Error(Loc, "illegal IT block condition mask '" + ITMask + "'");
4395      }
4396      Mask >>= 1;
4397      if (ITMask[i - 1] == 't')
4398        Mask |= 8;
4399    }
4400    Operands.push_back(ARMOperand::CreateITMask(Mask, Loc));
4401  }
4402
4403  // FIXME: This is all a pretty gross hack. We should automatically handle
4404  // optional operands like this via tblgen.
4405
4406  // Next, add the CCOut and ConditionCode operands, if needed.
4407  //
4408  // For mnemonics which can ever incorporate a carry setting bit or predication
4409  // code, our matching model involves us always generating CCOut and
4410  // ConditionCode operands to match the mnemonic "as written" and then we let
4411  // the matcher deal with finding the right instruction or generating an
4412  // appropriate error.
4413  bool CanAcceptCarrySet, CanAcceptPredicationCode;
4414  getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode);
4415
4416  // If we had a carry-set on an instruction that can't do that, issue an
4417  // error.
4418  if (!CanAcceptCarrySet && CarrySetting) {
4419    Parser.EatToEndOfStatement();
4420    return Error(NameLoc, "instruction '" + Mnemonic +
4421                 "' can not set flags, but 's' suffix specified");
4422  }
4423  // If we had a predication code on an instruction that can't do that, issue an
4424  // error.
4425  if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) {
4426    Parser.EatToEndOfStatement();
4427    return Error(NameLoc, "instruction '" + Mnemonic +
4428                 "' is not predicable, but condition code specified");
4429  }
4430
4431  // Add the carry setting operand, if necessary.
4432  if (CanAcceptCarrySet) {
4433    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size());
4434    Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0,
4435                                               Loc));
4436  }
4437
4438  // Add the predication code operand, if necessary.
4439  if (CanAcceptPredicationCode) {
4440    SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() +
4441                                      CarrySetting);
4442    Operands.push_back(ARMOperand::CreateCondCode(
4443                         ARMCC::CondCodes(PredicationCode), Loc));
4444  }
4445
4446  // Add the processor imod operand, if necessary.
4447  if (ProcessorIMod) {
4448    Operands.push_back(ARMOperand::CreateImm(
4449          MCConstantExpr::Create(ProcessorIMod, getContext()),
4450                                 NameLoc, NameLoc));
4451  }
4452
4453  // Add the remaining tokens in the mnemonic.
4454  while (Next != StringRef::npos) {
4455    Start = Next;
4456    Next = Name.find('.', Start + 1);
4457    StringRef ExtraToken = Name.slice(Start, Next);
4458
4459    // Some NEON instructions have an optional datatype suffix that is
4460    // completely ignored. Check for that.
4461    if (isDataTypeToken(ExtraToken) &&
4462        doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken))
4463      continue;
4464
4465    if (ExtraToken != ".n") {
4466      SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start);
4467      Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc));
4468    }
4469  }
4470
4471  // Read the remaining operands.
4472  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4473    // Read the first operand.
4474    if (parseOperand(Operands, Mnemonic)) {
4475      Parser.EatToEndOfStatement();
4476      return true;
4477    }
4478
4479    while (getLexer().is(AsmToken::Comma)) {
4480      Parser.Lex();  // Eat the comma.
4481
4482      // Parse and remember the operand.
4483      if (parseOperand(Operands, Mnemonic)) {
4484        Parser.EatToEndOfStatement();
4485        return true;
4486      }
4487    }
4488  }
4489
4490  if (getLexer().isNot(AsmToken::EndOfStatement)) {
4491    SMLoc Loc = getLexer().getLoc();
4492    Parser.EatToEndOfStatement();
4493    return Error(Loc, "unexpected token in argument list");
4494  }
4495
4496  Parser.Lex(); // Consume the EndOfStatement
4497
4498  // Some instructions, mostly Thumb, have forms for the same mnemonic that
4499  // do and don't have a cc_out optional-def operand. With some spot-checks
4500  // of the operand list, we can figure out which variant we're trying to
4501  // parse and adjust accordingly before actually matching. We shouldn't ever
4502  // try to remove a cc_out operand that was explicitly set on the the
4503  // mnemonic, of course (CarrySetting == true). Reason number #317 the
4504  // table driven matcher doesn't fit well with the ARM instruction set.
4505  if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) {
4506    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4507    Operands.erase(Operands.begin() + 1);
4508    delete Op;
4509  }
4510
4511  // ARM mode 'blx' need special handling, as the register operand version
4512  // is predicable, but the label operand version is not. So, we can't rely
4513  // on the Mnemonic based checking to correctly figure out when to put
4514  // a k_CondCode operand in the list. If we're trying to match the label
4515  // version, remove the k_CondCode operand here.
4516  if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 &&
4517      static_cast<ARMOperand*>(Operands[2])->isImm()) {
4518    ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]);
4519    Operands.erase(Operands.begin() + 1);
4520    delete Op;
4521  }
4522
4523  // The vector-compare-to-zero instructions have a literal token "#0" at
4524  // the end that comes to here as an immediate operand. Convert it to a
4525  // token to play nicely with the matcher.
4526  if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" ||
4527      Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 &&
4528      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4529    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4530    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4531    if (CE && CE->getValue() == 0) {
4532      Operands.erase(Operands.begin() + 5);
4533      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4534      delete Op;
4535    }
4536  }
4537  // VCMP{E} does the same thing, but with a different operand count.
4538  if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 &&
4539      static_cast<ARMOperand*>(Operands[4])->isImm()) {
4540    ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]);
4541    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4542    if (CE && CE->getValue() == 0) {
4543      Operands.erase(Operands.begin() + 4);
4544      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4545      delete Op;
4546    }
4547  }
4548  // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the
4549  // end. Convert it to a token here.
4550  if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 &&
4551      static_cast<ARMOperand*>(Operands[5])->isImm()) {
4552    ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]);
4553    const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm());
4554    if (CE && CE->getValue() == 0) {
4555      Operands.erase(Operands.begin() + 5);
4556      Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc()));
4557      delete Op;
4558    }
4559  }
4560
4561  return false;
4562}
4563
4564// Validate context-sensitive operand constraints.
4565
4566// return 'true' if register list contains non-low GPR registers,
4567// 'false' otherwise. If Reg is in the register list or is HiReg, set
4568// 'containsReg' to true.
4569static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg,
4570                                 unsigned HiReg, bool &containsReg) {
4571  containsReg = false;
4572  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4573    unsigned OpReg = Inst.getOperand(i).getReg();
4574    if (OpReg == Reg)
4575      containsReg = true;
4576    // Anything other than a low register isn't legal here.
4577    if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg))
4578      return true;
4579  }
4580  return false;
4581}
4582
4583// Check if the specified regisgter is in the register list of the inst,
4584// starting at the indicated operand number.
4585static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) {
4586  for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) {
4587    unsigned OpReg = Inst.getOperand(i).getReg();
4588    if (OpReg == Reg)
4589      return true;
4590  }
4591  return false;
4592}
4593
4594// FIXME: We would really prefer to have MCInstrInfo (the wrapper around
4595// the ARMInsts array) instead. Getting that here requires awkward
4596// API changes, though. Better way?
4597namespace llvm {
4598extern const MCInstrDesc ARMInsts[];
4599}
4600static const MCInstrDesc &getInstDesc(unsigned Opcode) {
4601  return ARMInsts[Opcode];
4602}
4603
4604// FIXME: We would really like to be able to tablegen'erate this.
4605bool ARMAsmParser::
4606validateInstruction(MCInst &Inst,
4607                    const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4608  const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode());
4609  SMLoc Loc = Operands[0]->getStartLoc();
4610  // Check the IT block state first.
4611  // NOTE: In Thumb mode, the BKPT instruction has the interesting property of
4612  // being allowed in IT blocks, but not being predicable.  It just always
4613  // executes.
4614  if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) {
4615    unsigned bit = 1;
4616    if (ITState.FirstCond)
4617      ITState.FirstCond = false;
4618    else
4619      bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1;
4620    // The instruction must be predicable.
4621    if (!MCID.isPredicable())
4622      return Error(Loc, "instructions in IT block must be predicable");
4623    unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm();
4624    unsigned ITCond = bit ? ITState.Cond :
4625      ARMCC::getOppositeCondition(ITState.Cond);
4626    if (Cond != ITCond) {
4627      // Find the condition code Operand to get its SMLoc information.
4628      SMLoc CondLoc;
4629      for (unsigned i = 1; i < Operands.size(); ++i)
4630        if (static_cast<ARMOperand*>(Operands[i])->isCondCode())
4631          CondLoc = Operands[i]->getStartLoc();
4632      return Error(CondLoc, "incorrect condition in IT block; got '" +
4633                   StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) +
4634                   "', but expected '" +
4635                   ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'");
4636    }
4637  // Check for non-'al' condition codes outside of the IT block.
4638  } else if (isThumbTwo() && MCID.isPredicable() &&
4639             Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() !=
4640             ARMCC::AL && Inst.getOpcode() != ARM::tB &&
4641             Inst.getOpcode() != ARM::t2B)
4642    return Error(Loc, "predicated instructions must be in IT block");
4643
4644  switch (Inst.getOpcode()) {
4645  case ARM::LDRD:
4646  case ARM::LDRD_PRE:
4647  case ARM::LDRD_POST:
4648  case ARM::LDREXD: {
4649    // Rt2 must be Rt + 1.
4650    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4651    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4652    if (Rt2 != Rt + 1)
4653      return Error(Operands[3]->getStartLoc(),
4654                   "destination operands must be sequential");
4655    return false;
4656  }
4657  case ARM::STRD: {
4658    // Rt2 must be Rt + 1.
4659    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg());
4660    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4661    if (Rt2 != Rt + 1)
4662      return Error(Operands[3]->getStartLoc(),
4663                   "source operands must be sequential");
4664    return false;
4665  }
4666  case ARM::STRD_PRE:
4667  case ARM::STRD_POST:
4668  case ARM::STREXD: {
4669    // Rt2 must be Rt + 1.
4670    unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg());
4671    unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg());
4672    if (Rt2 != Rt + 1)
4673      return Error(Operands[3]->getStartLoc(),
4674                   "source operands must be sequential");
4675    return false;
4676  }
4677  case ARM::SBFX:
4678  case ARM::UBFX: {
4679    // width must be in range [1, 32-lsb]
4680    unsigned lsb = Inst.getOperand(2).getImm();
4681    unsigned widthm1 = Inst.getOperand(3).getImm();
4682    if (widthm1 >= 32 - lsb)
4683      return Error(Operands[5]->getStartLoc(),
4684                   "bitfield width must be in range [1,32-lsb]");
4685    return false;
4686  }
4687  case ARM::tLDMIA: {
4688    // If we're parsing Thumb2, the .w variant is available and handles
4689    // most cases that are normally illegal for a Thumb1 LDM
4690    // instruction. We'll make the transformation in processInstruction()
4691    // if necessary.
4692    //
4693    // Thumb LDM instructions are writeback iff the base register is not
4694    // in the register list.
4695    unsigned Rn = Inst.getOperand(0).getReg();
4696    bool hasWritebackToken =
4697      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
4698       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
4699    bool listContainsBase;
4700    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo())
4701      return Error(Operands[3 + hasWritebackToken]->getStartLoc(),
4702                   "registers must be in range r0-r7");
4703    // If we should have writeback, then there should be a '!' token.
4704    if (!listContainsBase && !hasWritebackToken && !isThumbTwo())
4705      return Error(Operands[2]->getStartLoc(),
4706                   "writeback operator '!' expected");
4707    // If we should not have writeback, there must not be a '!'. This is
4708    // true even for the 32-bit wide encodings.
4709    if (listContainsBase && hasWritebackToken)
4710      return Error(Operands[3]->getStartLoc(),
4711                   "writeback operator '!' not allowed when base register "
4712                   "in register list");
4713
4714    break;
4715  }
4716  case ARM::t2LDMIA_UPD: {
4717    if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg()))
4718      return Error(Operands[4]->getStartLoc(),
4719                   "writeback operator '!' not allowed when base register "
4720                   "in register list");
4721    break;
4722  }
4723  // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2,
4724  // so only issue a diagnostic for thumb1. The instructions will be
4725  // switched to the t2 encodings in processInstruction() if necessary.
4726  case ARM::tPOP: {
4727    bool listContainsBase;
4728    if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) &&
4729        !isThumbTwo())
4730      return Error(Operands[2]->getStartLoc(),
4731                   "registers must be in range r0-r7 or pc");
4732    break;
4733  }
4734  case ARM::tPUSH: {
4735    bool listContainsBase;
4736    if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) &&
4737        !isThumbTwo())
4738      return Error(Operands[2]->getStartLoc(),
4739                   "registers must be in range r0-r7 or lr");
4740    break;
4741  }
4742  case ARM::tSTMIA_UPD: {
4743    bool listContainsBase;
4744    if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo())
4745      return Error(Operands[4]->getStartLoc(),
4746                   "registers must be in range r0-r7");
4747    break;
4748  }
4749  }
4750
4751  return false;
4752}
4753
4754static unsigned getRealVLDNOpcode(unsigned Opc) {
4755  switch(Opc) {
4756  default: assert(0 && "unexpected opcode!");
4757  case ARM::VLD1LNd8asm:   return ARM::VLD1LNd8;
4758  case ARM::VLD1LNdf32asm: return ARM::VLD1LNd32;
4759  }
4760}
4761
4762bool ARMAsmParser::
4763processInstruction(MCInst &Inst,
4764                   const SmallVectorImpl<MCParsedAsmOperand*> &Operands) {
4765  switch (Inst.getOpcode()) {
4766  // Handle NEON VLD1 complex aliases.
4767  case ARM::VLD1LNd8asm:
4768  case ARM::VLD1LNdf32asm: {
4769    MCInst TmpInst;
4770    // Shuffle the operands around so the lane index operand is in the
4771    // right place.
4772    TmpInst.setOpcode(getRealVLDNOpcode(Inst.getOpcode()));
4773    TmpInst.addOperand(Inst.getOperand(0)); // Vd
4774    TmpInst.addOperand(Inst.getOperand(2)); // Rn
4775    TmpInst.addOperand(Inst.getOperand(3)); // alignment
4776    TmpInst.addOperand(Inst.getOperand(0)); // Tied operand src (== Vd)
4777    TmpInst.addOperand(Inst.getOperand(1)); // lane
4778    TmpInst.addOperand(Inst.getOperand(4)); // CondCode
4779    TmpInst.addOperand(Inst.getOperand(5));
4780    Inst = TmpInst;
4781    return true;
4782  }
4783  // Handle the MOV complex aliases.
4784  case ARM::ASRr:
4785  case ARM::LSRr:
4786  case ARM::LSLr:
4787  case ARM::RORr: {
4788    ARM_AM::ShiftOpc ShiftTy;
4789    switch(Inst.getOpcode()) {
4790    default: llvm_unreachable("unexpected opcode!");
4791    case ARM::ASRr: ShiftTy = ARM_AM::asr; break;
4792    case ARM::LSRr: ShiftTy = ARM_AM::lsr; break;
4793    case ARM::LSLr: ShiftTy = ARM_AM::lsl; break;
4794    case ARM::RORr: ShiftTy = ARM_AM::ror; break;
4795    }
4796    // A shift by zero is a plain MOVr, not a MOVsi.
4797    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0);
4798    MCInst TmpInst;
4799    TmpInst.setOpcode(ARM::MOVsr);
4800    TmpInst.addOperand(Inst.getOperand(0)); // Rd
4801    TmpInst.addOperand(Inst.getOperand(1)); // Rn
4802    TmpInst.addOperand(Inst.getOperand(2)); // Rm
4803    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
4804    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
4805    TmpInst.addOperand(Inst.getOperand(4));
4806    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
4807    Inst = TmpInst;
4808    return true;
4809  }
4810  case ARM::ASRi:
4811  case ARM::LSRi:
4812  case ARM::LSLi:
4813  case ARM::RORi: {
4814    ARM_AM::ShiftOpc ShiftTy;
4815    switch(Inst.getOpcode()) {
4816    default: llvm_unreachable("unexpected opcode!");
4817    case ARM::ASRi: ShiftTy = ARM_AM::asr; break;
4818    case ARM::LSRi: ShiftTy = ARM_AM::lsr; break;
4819    case ARM::LSLi: ShiftTy = ARM_AM::lsl; break;
4820    case ARM::RORi: ShiftTy = ARM_AM::ror; break;
4821    }
4822    // A shift by zero is a plain MOVr, not a MOVsi.
4823    unsigned Amt = Inst.getOperand(2).getImm();
4824    unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi;
4825    unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt);
4826    MCInst TmpInst;
4827    TmpInst.setOpcode(Opc);
4828    TmpInst.addOperand(Inst.getOperand(0)); // Rd
4829    TmpInst.addOperand(Inst.getOperand(1)); // Rn
4830    if (Opc == ARM::MOVsi)
4831      TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
4832    TmpInst.addOperand(Inst.getOperand(3)); // CondCode
4833    TmpInst.addOperand(Inst.getOperand(4));
4834    TmpInst.addOperand(Inst.getOperand(5)); // cc_out
4835    Inst = TmpInst;
4836    return true;
4837  }
4838  case ARM::RRXi: {
4839    unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0);
4840    MCInst TmpInst;
4841    TmpInst.setOpcode(ARM::MOVsi);
4842    TmpInst.addOperand(Inst.getOperand(0)); // Rd
4843    TmpInst.addOperand(Inst.getOperand(1)); // Rn
4844    TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty
4845    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
4846    TmpInst.addOperand(Inst.getOperand(3));
4847    TmpInst.addOperand(Inst.getOperand(4)); // cc_out
4848    Inst = TmpInst;
4849    return true;
4850  }
4851  case ARM::t2LDMIA_UPD: {
4852    // If this is a load of a single register, then we should use
4853    // a post-indexed LDR instruction instead, per the ARM ARM.
4854    if (Inst.getNumOperands() != 5)
4855      return false;
4856    MCInst TmpInst;
4857    TmpInst.setOpcode(ARM::t2LDR_POST);
4858    TmpInst.addOperand(Inst.getOperand(4)); // Rt
4859    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
4860    TmpInst.addOperand(Inst.getOperand(1)); // Rn
4861    TmpInst.addOperand(MCOperand::CreateImm(4));
4862    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
4863    TmpInst.addOperand(Inst.getOperand(3));
4864    Inst = TmpInst;
4865    return true;
4866  }
4867  case ARM::t2STMDB_UPD: {
4868    // If this is a store of a single register, then we should use
4869    // a pre-indexed STR instruction instead, per the ARM ARM.
4870    if (Inst.getNumOperands() != 5)
4871      return false;
4872    MCInst TmpInst;
4873    TmpInst.setOpcode(ARM::t2STR_PRE);
4874    TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
4875    TmpInst.addOperand(Inst.getOperand(4)); // Rt
4876    TmpInst.addOperand(Inst.getOperand(1)); // Rn
4877    TmpInst.addOperand(MCOperand::CreateImm(-4));
4878    TmpInst.addOperand(Inst.getOperand(2)); // CondCode
4879    TmpInst.addOperand(Inst.getOperand(3));
4880    Inst = TmpInst;
4881    return true;
4882  }
4883  case ARM::LDMIA_UPD:
4884    // If this is a load of a single register via a 'pop', then we should use
4885    // a post-indexed LDR instruction instead, per the ARM ARM.
4886    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" &&
4887        Inst.getNumOperands() == 5) {
4888      MCInst TmpInst;
4889      TmpInst.setOpcode(ARM::LDR_POST_IMM);
4890      TmpInst.addOperand(Inst.getOperand(4)); // Rt
4891      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
4892      TmpInst.addOperand(Inst.getOperand(1)); // Rn
4893      TmpInst.addOperand(MCOperand::CreateReg(0));  // am2offset
4894      TmpInst.addOperand(MCOperand::CreateImm(4));
4895      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
4896      TmpInst.addOperand(Inst.getOperand(3));
4897      Inst = TmpInst;
4898      return true;
4899    }
4900    break;
4901  case ARM::STMDB_UPD:
4902    // If this is a store of a single register via a 'push', then we should use
4903    // a pre-indexed STR instruction instead, per the ARM ARM.
4904    if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" &&
4905        Inst.getNumOperands() == 5) {
4906      MCInst TmpInst;
4907      TmpInst.setOpcode(ARM::STR_PRE_IMM);
4908      TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb
4909      TmpInst.addOperand(Inst.getOperand(4)); // Rt
4910      TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12
4911      TmpInst.addOperand(MCOperand::CreateImm(-4));
4912      TmpInst.addOperand(Inst.getOperand(2)); // CondCode
4913      TmpInst.addOperand(Inst.getOperand(3));
4914      Inst = TmpInst;
4915    }
4916    break;
4917  case ARM::tADDi8:
4918    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
4919    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
4920    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
4921    // to encoding T1 if <Rd> is omitted."
4922    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
4923      Inst.setOpcode(ARM::tADDi3);
4924      return true;
4925    }
4926    break;
4927  case ARM::tSUBi8:
4928    // If the immediate is in the range 0-7, we want tADDi3 iff Rd was
4929    // explicitly specified. From the ARM ARM: "Encoding T1 is preferred
4930    // to encoding T2 if <Rd> is specified and encoding T2 is preferred
4931    // to encoding T1 if <Rd> is omitted."
4932    if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) {
4933      Inst.setOpcode(ARM::tSUBi3);
4934      return true;
4935    }
4936    break;
4937  case ARM::tB:
4938    // A Thumb conditional branch outside of an IT block is a tBcc.
4939    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) {
4940      Inst.setOpcode(ARM::tBcc);
4941      return true;
4942    }
4943    break;
4944  case ARM::t2B:
4945    // A Thumb2 conditional branch outside of an IT block is a t2Bcc.
4946    if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){
4947      Inst.setOpcode(ARM::t2Bcc);
4948      return true;
4949    }
4950    break;
4951  case ARM::t2Bcc:
4952    // If the conditional is AL or we're in an IT block, we really want t2B.
4953    if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) {
4954      Inst.setOpcode(ARM::t2B);
4955      return true;
4956    }
4957    break;
4958  case ARM::tBcc:
4959    // If the conditional is AL, we really want tB.
4960    if (Inst.getOperand(1).getImm() == ARMCC::AL) {
4961      Inst.setOpcode(ARM::tB);
4962      return true;
4963    }
4964    break;
4965  case ARM::tLDMIA: {
4966    // If the register list contains any high registers, or if the writeback
4967    // doesn't match what tLDMIA can do, we need to use the 32-bit encoding
4968    // instead if we're in Thumb2. Otherwise, this should have generated
4969    // an error in validateInstruction().
4970    unsigned Rn = Inst.getOperand(0).getReg();
4971    bool hasWritebackToken =
4972      (static_cast<ARMOperand*>(Operands[3])->isToken() &&
4973       static_cast<ARMOperand*>(Operands[3])->getToken() == "!");
4974    bool listContainsBase;
4975    if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) ||
4976        (!listContainsBase && !hasWritebackToken) ||
4977        (listContainsBase && hasWritebackToken)) {
4978      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
4979      assert (isThumbTwo());
4980      Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA);
4981      // If we're switching to the updating version, we need to insert
4982      // the writeback tied operand.
4983      if (hasWritebackToken)
4984        Inst.insert(Inst.begin(),
4985                    MCOperand::CreateReg(Inst.getOperand(0).getReg()));
4986      return true;
4987    }
4988    break;
4989  }
4990  case ARM::tSTMIA_UPD: {
4991    // If the register list contains any high registers, we need to use
4992    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
4993    // should have generated an error in validateInstruction().
4994    unsigned Rn = Inst.getOperand(0).getReg();
4995    bool listContainsBase;
4996    if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) {
4997      // 16-bit encoding isn't sufficient. Switch to the 32-bit version.
4998      assert (isThumbTwo());
4999      Inst.setOpcode(ARM::t2STMIA_UPD);
5000      return true;
5001    }
5002    break;
5003  }
5004  case ARM::tPOP: {
5005    bool listContainsBase;
5006    // If the register list contains any high registers, we need to use
5007    // the 32-bit encoding instead if we're in Thumb2. Otherwise, this
5008    // should have generated an error in validateInstruction().
5009    if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase))
5010      return false;
5011    assert (isThumbTwo());
5012    Inst.setOpcode(ARM::t2LDMIA_UPD);
5013    // Add the base register and writeback operands.
5014    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5015    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5016    return true;
5017  }
5018  case ARM::tPUSH: {
5019    bool listContainsBase;
5020    if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase))
5021      return false;
5022    assert (isThumbTwo());
5023    Inst.setOpcode(ARM::t2STMDB_UPD);
5024    // Add the base register and writeback operands.
5025    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5026    Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP));
5027    return true;
5028  }
5029  case ARM::t2MOVi: {
5030    // If we can use the 16-bit encoding and the user didn't explicitly
5031    // request the 32-bit variant, transform it here.
5032    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5033        Inst.getOperand(1).getImm() <= 255 &&
5034        ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL &&
5035         Inst.getOperand(4).getReg() == ARM::CPSR) ||
5036        (inITBlock() && Inst.getOperand(4).getReg() == 0)) &&
5037        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5038         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5039      // The operands aren't in the same order for tMOVi8...
5040      MCInst TmpInst;
5041      TmpInst.setOpcode(ARM::tMOVi8);
5042      TmpInst.addOperand(Inst.getOperand(0));
5043      TmpInst.addOperand(Inst.getOperand(4));
5044      TmpInst.addOperand(Inst.getOperand(1));
5045      TmpInst.addOperand(Inst.getOperand(2));
5046      TmpInst.addOperand(Inst.getOperand(3));
5047      Inst = TmpInst;
5048      return true;
5049    }
5050    break;
5051  }
5052  case ARM::t2MOVr: {
5053    // If we can use the 16-bit encoding and the user didn't explicitly
5054    // request the 32-bit variant, transform it here.
5055    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5056        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5057        Inst.getOperand(2).getImm() == ARMCC::AL &&
5058        Inst.getOperand(4).getReg() == ARM::CPSR &&
5059        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5060         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5061      // The operands aren't the same for tMOV[S]r... (no cc_out)
5062      MCInst TmpInst;
5063      TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr);
5064      TmpInst.addOperand(Inst.getOperand(0));
5065      TmpInst.addOperand(Inst.getOperand(1));
5066      TmpInst.addOperand(Inst.getOperand(2));
5067      TmpInst.addOperand(Inst.getOperand(3));
5068      Inst = TmpInst;
5069      return true;
5070    }
5071    break;
5072  }
5073  case ARM::t2SXTH:
5074  case ARM::t2SXTB:
5075  case ARM::t2UXTH:
5076  case ARM::t2UXTB: {
5077    // If we can use the 16-bit encoding and the user didn't explicitly
5078    // request the 32-bit variant, transform it here.
5079    if (isARMLowRegister(Inst.getOperand(0).getReg()) &&
5080        isARMLowRegister(Inst.getOperand(1).getReg()) &&
5081        Inst.getOperand(2).getImm() == 0 &&
5082        (!static_cast<ARMOperand*>(Operands[2])->isToken() ||
5083         static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) {
5084      unsigned NewOpc;
5085      switch (Inst.getOpcode()) {
5086      default: llvm_unreachable("Illegal opcode!");
5087      case ARM::t2SXTH: NewOpc = ARM::tSXTH; break;
5088      case ARM::t2SXTB: NewOpc = ARM::tSXTB; break;
5089      case ARM::t2UXTH: NewOpc = ARM::tUXTH; break;
5090      case ARM::t2UXTB: NewOpc = ARM::tUXTB; break;
5091      }
5092      // The operands aren't the same for thumb1 (no rotate operand).
5093      MCInst TmpInst;
5094      TmpInst.setOpcode(NewOpc);
5095      TmpInst.addOperand(Inst.getOperand(0));
5096      TmpInst.addOperand(Inst.getOperand(1));
5097      TmpInst.addOperand(Inst.getOperand(3));
5098      TmpInst.addOperand(Inst.getOperand(4));
5099      Inst = TmpInst;
5100      return true;
5101    }
5102    break;
5103  }
5104  case ARM::t2IT: {
5105    // The mask bits for all but the first condition are represented as
5106    // the low bit of the condition code value implies 't'. We currently
5107    // always have 1 implies 't', so XOR toggle the bits if the low bit
5108    // of the condition code is zero. The encoding also expects the low
5109    // bit of the condition to be encoded as bit 4 of the mask operand,
5110    // so mask that in if needed
5111    MCOperand &MO = Inst.getOperand(1);
5112    unsigned Mask = MO.getImm();
5113    unsigned OrigMask = Mask;
5114    unsigned TZ = CountTrailingZeros_32(Mask);
5115    if ((Inst.getOperand(0).getImm() & 1) == 0) {
5116      assert(Mask && TZ <= 3 && "illegal IT mask value!");
5117      for (unsigned i = 3; i != TZ; --i)
5118        Mask ^= 1 << i;
5119    } else
5120      Mask |= 0x10;
5121    MO.setImm(Mask);
5122
5123    // Set up the IT block state according to the IT instruction we just
5124    // matched.
5125    assert(!inITBlock() && "nested IT blocks?!");
5126    ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm());
5127    ITState.Mask = OrigMask; // Use the original mask, not the updated one.
5128    ITState.CurPosition = 0;
5129    ITState.FirstCond = true;
5130    break;
5131  }
5132  }
5133  return false;
5134}
5135
5136unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) {
5137  // 16-bit thumb arithmetic instructions either require or preclude the 'S'
5138  // suffix depending on whether they're in an IT block or not.
5139  unsigned Opc = Inst.getOpcode();
5140  const MCInstrDesc &MCID = getInstDesc(Opc);
5141  if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) {
5142    assert(MCID.hasOptionalDef() &&
5143           "optionally flag setting instruction missing optional def operand");
5144    assert(MCID.NumOperands == Inst.getNumOperands() &&
5145           "operand count mismatch!");
5146    // Find the optional-def operand (cc_out).
5147    unsigned OpNo;
5148    for (OpNo = 0;
5149         !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands;
5150         ++OpNo)
5151      ;
5152    // If we're parsing Thumb1, reject it completely.
5153    if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR)
5154      return Match_MnemonicFail;
5155    // If we're parsing Thumb2, which form is legal depends on whether we're
5156    // in an IT block.
5157    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR &&
5158        !inITBlock())
5159      return Match_RequiresITBlock;
5160    if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR &&
5161        inITBlock())
5162      return Match_RequiresNotITBlock;
5163  }
5164  // Some high-register supporting Thumb1 encodings only allow both registers
5165  // to be from r0-r7 when in Thumb2.
5166  else if (Opc == ARM::tADDhirr && isThumbOne() &&
5167           isARMLowRegister(Inst.getOperand(1).getReg()) &&
5168           isARMLowRegister(Inst.getOperand(2).getReg()))
5169    return Match_RequiresThumb2;
5170  // Others only require ARMv6 or later.
5171  else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() &&
5172           isARMLowRegister(Inst.getOperand(0).getReg()) &&
5173           isARMLowRegister(Inst.getOperand(1).getReg()))
5174    return Match_RequiresV6;
5175  return Match_Success;
5176}
5177
5178bool ARMAsmParser::
5179MatchAndEmitInstruction(SMLoc IDLoc,
5180                        SmallVectorImpl<MCParsedAsmOperand*> &Operands,
5181                        MCStreamer &Out) {
5182  MCInst Inst;
5183  unsigned ErrorInfo;
5184  unsigned MatchResult;
5185  MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo);
5186  switch (MatchResult) {
5187  default: break;
5188  case Match_Success:
5189    // Context sensitive operand constraints aren't handled by the matcher,
5190    // so check them here.
5191    if (validateInstruction(Inst, Operands)) {
5192      // Still progress the IT block, otherwise one wrong condition causes
5193      // nasty cascading errors.
5194      forwardITPosition();
5195      return true;
5196    }
5197
5198    // Some instructions need post-processing to, for example, tweak which
5199    // encoding is selected. Loop on it while changes happen so the
5200    // individual transformations can chain off each other. E.g.,
5201    // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8)
5202    while (processInstruction(Inst, Operands))
5203      ;
5204
5205    // Only move forward at the very end so that everything in validate
5206    // and process gets a consistent answer about whether we're in an IT
5207    // block.
5208    forwardITPosition();
5209
5210    Out.EmitInstruction(Inst);
5211    return false;
5212  case Match_MissingFeature:
5213    Error(IDLoc, "instruction requires a CPU feature not currently enabled");
5214    return true;
5215  case Match_InvalidOperand: {
5216    SMLoc ErrorLoc = IDLoc;
5217    if (ErrorInfo != ~0U) {
5218      if (ErrorInfo >= Operands.size())
5219        return Error(IDLoc, "too few operands for instruction");
5220
5221      ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc();
5222      if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc;
5223    }
5224
5225    return Error(ErrorLoc, "invalid operand for instruction");
5226  }
5227  case Match_MnemonicFail:
5228    return Error(IDLoc, "invalid instruction");
5229  case Match_ConversionFail:
5230    // The converter function will have already emited a diagnostic.
5231    return true;
5232  case Match_RequiresNotITBlock:
5233    return Error(IDLoc, "flag setting instruction only valid outside IT block");
5234  case Match_RequiresITBlock:
5235    return Error(IDLoc, "instruction only valid inside IT block");
5236  case Match_RequiresV6:
5237    return Error(IDLoc, "instruction variant requires ARMv6 or later");
5238  case Match_RequiresThumb2:
5239    return Error(IDLoc, "instruction variant requires Thumb2");
5240  }
5241
5242  llvm_unreachable("Implement any new match types added!");
5243  return true;
5244}
5245
5246/// parseDirective parses the arm specific directives
5247bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) {
5248  StringRef IDVal = DirectiveID.getIdentifier();
5249  if (IDVal == ".word")
5250    return parseDirectiveWord(4, DirectiveID.getLoc());
5251  else if (IDVal == ".thumb")
5252    return parseDirectiveThumb(DirectiveID.getLoc());
5253  else if (IDVal == ".thumb_func")
5254    return parseDirectiveThumbFunc(DirectiveID.getLoc());
5255  else if (IDVal == ".code")
5256    return parseDirectiveCode(DirectiveID.getLoc());
5257  else if (IDVal == ".syntax")
5258    return parseDirectiveSyntax(DirectiveID.getLoc());
5259  return true;
5260}
5261
5262/// parseDirectiveWord
5263///  ::= .word [ expression (, expression)* ]
5264bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) {
5265  if (getLexer().isNot(AsmToken::EndOfStatement)) {
5266    for (;;) {
5267      const MCExpr *Value;
5268      if (getParser().ParseExpression(Value))
5269        return true;
5270
5271      getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/);
5272
5273      if (getLexer().is(AsmToken::EndOfStatement))
5274        break;
5275
5276      // FIXME: Improve diagnostic.
5277      if (getLexer().isNot(AsmToken::Comma))
5278        return Error(L, "unexpected token in directive");
5279      Parser.Lex();
5280    }
5281  }
5282
5283  Parser.Lex();
5284  return false;
5285}
5286
5287/// parseDirectiveThumb
5288///  ::= .thumb
5289bool ARMAsmParser::parseDirectiveThumb(SMLoc L) {
5290  if (getLexer().isNot(AsmToken::EndOfStatement))
5291    return Error(L, "unexpected token in directive");
5292  Parser.Lex();
5293
5294  // TODO: set thumb mode
5295  // TODO: tell the MC streamer the mode
5296  // getParser().getStreamer().Emit???();
5297  return false;
5298}
5299
5300/// parseDirectiveThumbFunc
5301///  ::= .thumbfunc symbol_name
5302bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) {
5303  const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo();
5304  bool isMachO = MAI.hasSubsectionsViaSymbols();
5305  StringRef Name;
5306
5307  // Darwin asm has function name after .thumb_func direction
5308  // ELF doesn't
5309  if (isMachO) {
5310    const AsmToken &Tok = Parser.getTok();
5311    if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String))
5312      return Error(L, "unexpected token in .thumb_func directive");
5313    Name = Tok.getIdentifier();
5314    Parser.Lex(); // Consume the identifier token.
5315  }
5316
5317 if (getLexer().isNot(AsmToken::EndOfStatement))
5318    return Error(L, "unexpected token in directive");
5319  Parser.Lex();
5320
5321  // FIXME: assuming function name will be the line following .thumb_func
5322  if (!isMachO) {
5323    Name = Parser.getTok().getIdentifier();
5324  }
5325
5326  // Mark symbol as a thumb symbol.
5327  MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name);
5328  getParser().getStreamer().EmitThumbFunc(Func);
5329  return false;
5330}
5331
5332/// parseDirectiveSyntax
5333///  ::= .syntax unified | divided
5334bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) {
5335  const AsmToken &Tok = Parser.getTok();
5336  if (Tok.isNot(AsmToken::Identifier))
5337    return Error(L, "unexpected token in .syntax directive");
5338  StringRef Mode = Tok.getString();
5339  if (Mode == "unified" || Mode == "UNIFIED")
5340    Parser.Lex();
5341  else if (Mode == "divided" || Mode == "DIVIDED")
5342    return Error(L, "'.syntax divided' arm asssembly not supported");
5343  else
5344    return Error(L, "unrecognized syntax mode in .syntax directive");
5345
5346  if (getLexer().isNot(AsmToken::EndOfStatement))
5347    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
5348  Parser.Lex();
5349
5350  // TODO tell the MC streamer the mode
5351  // getParser().getStreamer().Emit???();
5352  return false;
5353}
5354
5355/// parseDirectiveCode
5356///  ::= .code 16 | 32
5357bool ARMAsmParser::parseDirectiveCode(SMLoc L) {
5358  const AsmToken &Tok = Parser.getTok();
5359  if (Tok.isNot(AsmToken::Integer))
5360    return Error(L, "unexpected token in .code directive");
5361  int64_t Val = Parser.getTok().getIntVal();
5362  if (Val == 16)
5363    Parser.Lex();
5364  else if (Val == 32)
5365    Parser.Lex();
5366  else
5367    return Error(L, "invalid operand to .code directive");
5368
5369  if (getLexer().isNot(AsmToken::EndOfStatement))
5370    return Error(Parser.getTok().getLoc(), "unexpected token in directive");
5371  Parser.Lex();
5372
5373  if (Val == 16) {
5374    if (!isThumb())
5375      SwitchMode();
5376    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16);
5377  } else {
5378    if (isThumb())
5379      SwitchMode();
5380    getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32);
5381  }
5382
5383  return false;
5384}
5385
5386extern "C" void LLVMInitializeARMAsmLexer();
5387
5388/// Force static initialization.
5389extern "C" void LLVMInitializeARMAsmParser() {
5390  RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget);
5391  RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget);
5392  LLVMInitializeARMAsmLexer();
5393}
5394
5395#define GET_REGISTER_MATCHER
5396#define GET_MATCHER_IMPLEMENTATION
5397#include "ARMGenAsmMatcher.inc"
5398