ARMAsmParser.cpp revision efed3d1f58f69ec0a9bbe74e2ce5cc9b939a3805
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringSwitch.h" 34#include "llvm/ADT/Twine.h" 35 36using namespace llvm; 37 38namespace { 39 40class ARMOperand; 41 42class ARMAsmParser : public MCTargetAsmParser { 43 MCSubtargetInfo &STI; 44 MCAsmParser &Parser; 45 46 struct { 47 ARMCC::CondCodes Cond; // Condition for IT block. 48 unsigned Mask:4; // Condition mask for instructions. 49 // Starting at first 1 (from lsb). 50 // '1' condition as indicated in IT. 51 // '0' inverse of condition (else). 52 // Count of instructions in IT block is 53 // 4 - trailingzeroes(mask) 54 55 bool FirstCond; // Explicit flag for when we're parsing the 56 // First instruction in the IT block. It's 57 // implied in the mask, so needs special 58 // handling. 59 60 unsigned CurPosition; // Current position in parsing of IT 61 // block. In range [0,3]. Initialized 62 // according to count of instructions in block. 63 // ~0U if no active IT block. 64 } ITState; 65 bool inITBlock() { return ITState.CurPosition != ~0U;} 66 void forwardITPosition() { 67 if (!inITBlock()) return; 68 // Move to the next instruction in the IT block, if there is one. If not, 69 // mark the block as done. 70 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 71 if (++ITState.CurPosition == 5 - TZ) 72 ITState.CurPosition = ~0U; // Done with the IT block after this. 73 } 74 75 76 MCAsmParser &getParser() const { return Parser; } 77 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 78 79 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 80 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 81 82 int tryParseRegister(); 83 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 84 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 85 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 88 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 89 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 90 unsigned &ShiftAmount); 91 bool parseDirectiveWord(unsigned Size, SMLoc L); 92 bool parseDirectiveThumb(SMLoc L); 93 bool parseDirectiveThumbFunc(SMLoc L); 94 bool parseDirectiveCode(SMLoc L); 95 bool parseDirectiveSyntax(SMLoc L); 96 97 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 98 bool &CarrySetting, unsigned &ProcessorIMod, 99 StringRef &ITMask); 100 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 101 bool &CanAcceptPredicationCode); 102 103 bool isThumb() const { 104 // FIXME: Can tablegen auto-generate this? 105 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 106 } 107 bool isThumbOne() const { 108 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 109 } 110 bool isThumbTwo() const { 111 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 112 } 113 bool hasV6Ops() const { 114 return STI.getFeatureBits() & ARM::HasV6Ops; 115 } 116 bool hasV7Ops() const { 117 return STI.getFeatureBits() & ARM::HasV7Ops; 118 } 119 void SwitchMode() { 120 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 121 setAvailableFeatures(FB); 122 } 123 bool isMClass() const { 124 return STI.getFeatureBits() & ARM::FeatureMClass; 125 } 126 127 /// @name Auto-generated Match Functions 128 /// { 129 130#define GET_ASSEMBLER_HEADER 131#include "ARMGenAsmMatcher.inc" 132 133 /// } 134 135 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 136 OperandMatchResultTy parseCoprocNumOperand( 137 SmallVectorImpl<MCParsedAsmOperand*>&); 138 OperandMatchResultTy parseCoprocRegOperand( 139 SmallVectorImpl<MCParsedAsmOperand*>&); 140 OperandMatchResultTy parseCoprocOptionOperand( 141 SmallVectorImpl<MCParsedAsmOperand*>&); 142 OperandMatchResultTy parseMemBarrierOptOperand( 143 SmallVectorImpl<MCParsedAsmOperand*>&); 144 OperandMatchResultTy parseProcIFlagsOperand( 145 SmallVectorImpl<MCParsedAsmOperand*>&); 146 OperandMatchResultTy parseMSRMaskOperand( 147 SmallVectorImpl<MCParsedAsmOperand*>&); 148 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 149 StringRef Op, int Low, int High); 150 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 151 return parsePKHImm(O, "lsl", 0, 31); 152 } 153 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 154 return parsePKHImm(O, "asr", 1, 32); 155 } 156 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 157 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 164 165 // Asm Match Converter Methods 166 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 167 const SmallVectorImpl<MCParsedAsmOperand*> &); 168 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 169 const SmallVectorImpl<MCParsedAsmOperand*> &); 170 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 171 const SmallVectorImpl<MCParsedAsmOperand*> &); 172 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 173 const SmallVectorImpl<MCParsedAsmOperand*> &); 174 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 175 const SmallVectorImpl<MCParsedAsmOperand*> &); 176 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 177 const SmallVectorImpl<MCParsedAsmOperand*> &); 178 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 179 const SmallVectorImpl<MCParsedAsmOperand*> &); 180 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 181 const SmallVectorImpl<MCParsedAsmOperand*> &); 182 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 183 const SmallVectorImpl<MCParsedAsmOperand*> &); 184 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 185 const SmallVectorImpl<MCParsedAsmOperand*> &); 186 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 187 const SmallVectorImpl<MCParsedAsmOperand*> &); 188 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 189 const SmallVectorImpl<MCParsedAsmOperand*> &); 190 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 191 const SmallVectorImpl<MCParsedAsmOperand*> &); 192 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 193 const SmallVectorImpl<MCParsedAsmOperand*> &); 194 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 195 const SmallVectorImpl<MCParsedAsmOperand*> &); 196 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 197 const SmallVectorImpl<MCParsedAsmOperand*> &); 198 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 199 const SmallVectorImpl<MCParsedAsmOperand*> &); 200 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 201 const SmallVectorImpl<MCParsedAsmOperand*> &); 202 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 203 const SmallVectorImpl<MCParsedAsmOperand*> &); 204 bool cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 205 const SmallVectorImpl<MCParsedAsmOperand*> &); 206 bool cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 207 const SmallVectorImpl<MCParsedAsmOperand*> &); 208 209 bool validateInstruction(MCInst &Inst, 210 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 211 bool processInstruction(MCInst &Inst, 212 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 213 bool shouldOmitCCOutOperand(StringRef Mnemonic, 214 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 215 216public: 217 enum ARMMatchResultTy { 218 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 219 Match_RequiresNotITBlock, 220 Match_RequiresV6, 221 Match_RequiresThumb2 222 }; 223 224 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 225 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 226 MCAsmParserExtension::Initialize(_Parser); 227 228 // Initialize the set of available features. 229 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 230 231 // Not in an ITBlock to start with. 232 ITState.CurPosition = ~0U; 233 } 234 235 // Implementation of the MCTargetAsmParser interface: 236 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 237 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 238 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 239 bool ParseDirective(AsmToken DirectiveID); 240 241 unsigned checkTargetMatchPredicate(MCInst &Inst); 242 243 bool MatchAndEmitInstruction(SMLoc IDLoc, 244 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 245 MCStreamer &Out); 246}; 247} // end anonymous namespace 248 249namespace { 250 251/// ARMOperand - Instances of this class represent a parsed ARM machine 252/// instruction. 253class ARMOperand : public MCParsedAsmOperand { 254 enum KindTy { 255 k_CondCode, 256 k_CCOut, 257 k_ITCondMask, 258 k_CoprocNum, 259 k_CoprocReg, 260 k_CoprocOption, 261 k_Immediate, 262 k_FPImmediate, 263 k_MemBarrierOpt, 264 k_Memory, 265 k_PostIndexRegister, 266 k_MSRMask, 267 k_ProcIFlags, 268 k_VectorIndex, 269 k_Register, 270 k_RegisterList, 271 k_DPRRegisterList, 272 k_SPRRegisterList, 273 k_VectorList, 274 k_ShiftedRegister, 275 k_ShiftedImmediate, 276 k_ShifterImmediate, 277 k_RotateImmediate, 278 k_BitfieldDescriptor, 279 k_Token 280 } Kind; 281 282 SMLoc StartLoc, EndLoc; 283 SmallVector<unsigned, 8> Registers; 284 285 union { 286 struct { 287 ARMCC::CondCodes Val; 288 } CC; 289 290 struct { 291 unsigned Val; 292 } Cop; 293 294 struct { 295 unsigned Val; 296 } CoprocOption; 297 298 struct { 299 unsigned Mask:4; 300 } ITMask; 301 302 struct { 303 ARM_MB::MemBOpt Val; 304 } MBOpt; 305 306 struct { 307 ARM_PROC::IFlags Val; 308 } IFlags; 309 310 struct { 311 unsigned Val; 312 } MMask; 313 314 struct { 315 const char *Data; 316 unsigned Length; 317 } Tok; 318 319 struct { 320 unsigned RegNum; 321 } Reg; 322 323 // A vector register list is a sequential list of 1 to 4 registers. 324 struct { 325 unsigned RegNum; 326 unsigned Count; 327 } VectorList; 328 329 struct { 330 unsigned Val; 331 } VectorIndex; 332 333 struct { 334 const MCExpr *Val; 335 } Imm; 336 337 struct { 338 unsigned Val; // encoded 8-bit representation 339 } FPImm; 340 341 /// Combined record for all forms of ARM address expressions. 342 struct { 343 unsigned BaseRegNum; 344 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 345 // was specified. 346 const MCConstantExpr *OffsetImm; // Offset immediate value 347 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 348 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 349 unsigned ShiftImm; // shift for OffsetReg. 350 unsigned Alignment; // 0 = no alignment specified 351 // n = alignment in bytes (8, 16, or 32) 352 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 353 } Memory; 354 355 struct { 356 unsigned RegNum; 357 bool isAdd; 358 ARM_AM::ShiftOpc ShiftTy; 359 unsigned ShiftImm; 360 } PostIdxReg; 361 362 struct { 363 bool isASR; 364 unsigned Imm; 365 } ShifterImm; 366 struct { 367 ARM_AM::ShiftOpc ShiftTy; 368 unsigned SrcReg; 369 unsigned ShiftReg; 370 unsigned ShiftImm; 371 } RegShiftedReg; 372 struct { 373 ARM_AM::ShiftOpc ShiftTy; 374 unsigned SrcReg; 375 unsigned ShiftImm; 376 } RegShiftedImm; 377 struct { 378 unsigned Imm; 379 } RotImm; 380 struct { 381 unsigned LSB; 382 unsigned Width; 383 } Bitfield; 384 }; 385 386 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 387public: 388 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 389 Kind = o.Kind; 390 StartLoc = o.StartLoc; 391 EndLoc = o.EndLoc; 392 switch (Kind) { 393 case k_CondCode: 394 CC = o.CC; 395 break; 396 case k_ITCondMask: 397 ITMask = o.ITMask; 398 break; 399 case k_Token: 400 Tok = o.Tok; 401 break; 402 case k_CCOut: 403 case k_Register: 404 Reg = o.Reg; 405 break; 406 case k_RegisterList: 407 case k_DPRRegisterList: 408 case k_SPRRegisterList: 409 Registers = o.Registers; 410 break; 411 case k_VectorList: 412 VectorList = o.VectorList; 413 break; 414 case k_CoprocNum: 415 case k_CoprocReg: 416 Cop = o.Cop; 417 break; 418 case k_CoprocOption: 419 CoprocOption = o.CoprocOption; 420 break; 421 case k_Immediate: 422 Imm = o.Imm; 423 break; 424 case k_FPImmediate: 425 FPImm = o.FPImm; 426 break; 427 case k_MemBarrierOpt: 428 MBOpt = o.MBOpt; 429 break; 430 case k_Memory: 431 Memory = o.Memory; 432 break; 433 case k_PostIndexRegister: 434 PostIdxReg = o.PostIdxReg; 435 break; 436 case k_MSRMask: 437 MMask = o.MMask; 438 break; 439 case k_ProcIFlags: 440 IFlags = o.IFlags; 441 break; 442 case k_ShifterImmediate: 443 ShifterImm = o.ShifterImm; 444 break; 445 case k_ShiftedRegister: 446 RegShiftedReg = o.RegShiftedReg; 447 break; 448 case k_ShiftedImmediate: 449 RegShiftedImm = o.RegShiftedImm; 450 break; 451 case k_RotateImmediate: 452 RotImm = o.RotImm; 453 break; 454 case k_BitfieldDescriptor: 455 Bitfield = o.Bitfield; 456 break; 457 case k_VectorIndex: 458 VectorIndex = o.VectorIndex; 459 break; 460 } 461 } 462 463 /// getStartLoc - Get the location of the first token of this operand. 464 SMLoc getStartLoc() const { return StartLoc; } 465 /// getEndLoc - Get the location of the last token of this operand. 466 SMLoc getEndLoc() const { return EndLoc; } 467 468 ARMCC::CondCodes getCondCode() const { 469 assert(Kind == k_CondCode && "Invalid access!"); 470 return CC.Val; 471 } 472 473 unsigned getCoproc() const { 474 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 475 return Cop.Val; 476 } 477 478 StringRef getToken() const { 479 assert(Kind == k_Token && "Invalid access!"); 480 return StringRef(Tok.Data, Tok.Length); 481 } 482 483 unsigned getReg() const { 484 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 485 return Reg.RegNum; 486 } 487 488 const SmallVectorImpl<unsigned> &getRegList() const { 489 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 490 Kind == k_SPRRegisterList) && "Invalid access!"); 491 return Registers; 492 } 493 494 const MCExpr *getImm() const { 495 assert(Kind == k_Immediate && "Invalid access!"); 496 return Imm.Val; 497 } 498 499 unsigned getFPImm() const { 500 assert(Kind == k_FPImmediate && "Invalid access!"); 501 return FPImm.Val; 502 } 503 504 unsigned getVectorIndex() const { 505 assert(Kind == k_VectorIndex && "Invalid access!"); 506 return VectorIndex.Val; 507 } 508 509 ARM_MB::MemBOpt getMemBarrierOpt() const { 510 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 511 return MBOpt.Val; 512 } 513 514 ARM_PROC::IFlags getProcIFlags() const { 515 assert(Kind == k_ProcIFlags && "Invalid access!"); 516 return IFlags.Val; 517 } 518 519 unsigned getMSRMask() const { 520 assert(Kind == k_MSRMask && "Invalid access!"); 521 return MMask.Val; 522 } 523 524 bool isCoprocNum() const { return Kind == k_CoprocNum; } 525 bool isCoprocReg() const { return Kind == k_CoprocReg; } 526 bool isCoprocOption() const { return Kind == k_CoprocOption; } 527 bool isCondCode() const { return Kind == k_CondCode; } 528 bool isCCOut() const { return Kind == k_CCOut; } 529 bool isITMask() const { return Kind == k_ITCondMask; } 530 bool isITCondCode() const { return Kind == k_CondCode; } 531 bool isImm() const { return Kind == k_Immediate; } 532 bool isFPImm() const { return Kind == k_FPImmediate; } 533 bool isImm8s4() const { 534 if (Kind != k_Immediate) 535 return false; 536 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 537 if (!CE) return false; 538 int64_t Value = CE->getValue(); 539 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 540 } 541 bool isImm0_1020s4() const { 542 if (Kind != k_Immediate) 543 return false; 544 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 545 if (!CE) return false; 546 int64_t Value = CE->getValue(); 547 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 548 } 549 bool isImm0_508s4() const { 550 if (Kind != k_Immediate) 551 return false; 552 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 553 if (!CE) return false; 554 int64_t Value = CE->getValue(); 555 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 556 } 557 bool isImm0_255() const { 558 if (Kind != k_Immediate) 559 return false; 560 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 561 if (!CE) return false; 562 int64_t Value = CE->getValue(); 563 return Value >= 0 && Value < 256; 564 } 565 bool isImm0_7() const { 566 if (Kind != k_Immediate) 567 return false; 568 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 569 if (!CE) return false; 570 int64_t Value = CE->getValue(); 571 return Value >= 0 && Value < 8; 572 } 573 bool isImm0_15() const { 574 if (Kind != k_Immediate) 575 return false; 576 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 577 if (!CE) return false; 578 int64_t Value = CE->getValue(); 579 return Value >= 0 && Value < 16; 580 } 581 bool isImm0_31() const { 582 if (Kind != k_Immediate) 583 return false; 584 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 585 if (!CE) return false; 586 int64_t Value = CE->getValue(); 587 return Value >= 0 && Value < 32; 588 } 589 bool isImm1_16() const { 590 if (Kind != k_Immediate) 591 return false; 592 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 593 if (!CE) return false; 594 int64_t Value = CE->getValue(); 595 return Value > 0 && Value < 17; 596 } 597 bool isImm1_32() const { 598 if (Kind != k_Immediate) 599 return false; 600 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 601 if (!CE) return false; 602 int64_t Value = CE->getValue(); 603 return Value > 0 && Value < 33; 604 } 605 bool isImm0_32() const { 606 if (Kind != k_Immediate) 607 return false; 608 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 609 if (!CE) return false; 610 int64_t Value = CE->getValue(); 611 return Value >= 0 && Value < 33; 612 } 613 bool isImm0_65535() const { 614 if (Kind != k_Immediate) 615 return false; 616 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 617 if (!CE) return false; 618 int64_t Value = CE->getValue(); 619 return Value >= 0 && Value < 65536; 620 } 621 bool isImm0_65535Expr() const { 622 if (Kind != k_Immediate) 623 return false; 624 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 625 // If it's not a constant expression, it'll generate a fixup and be 626 // handled later. 627 if (!CE) return true; 628 int64_t Value = CE->getValue(); 629 return Value >= 0 && Value < 65536; 630 } 631 bool isImm24bit() const { 632 if (Kind != k_Immediate) 633 return false; 634 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 635 if (!CE) return false; 636 int64_t Value = CE->getValue(); 637 return Value >= 0 && Value <= 0xffffff; 638 } 639 bool isImmThumbSR() const { 640 if (Kind != k_Immediate) 641 return false; 642 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 643 if (!CE) return false; 644 int64_t Value = CE->getValue(); 645 return Value > 0 && Value < 33; 646 } 647 bool isPKHLSLImm() const { 648 if (Kind != k_Immediate) 649 return false; 650 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 651 if (!CE) return false; 652 int64_t Value = CE->getValue(); 653 return Value >= 0 && Value < 32; 654 } 655 bool isPKHASRImm() const { 656 if (Kind != k_Immediate) 657 return false; 658 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 659 if (!CE) return false; 660 int64_t Value = CE->getValue(); 661 return Value > 0 && Value <= 32; 662 } 663 bool isARMSOImm() const { 664 if (Kind != k_Immediate) 665 return false; 666 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 667 if (!CE) return false; 668 int64_t Value = CE->getValue(); 669 return ARM_AM::getSOImmVal(Value) != -1; 670 } 671 bool isARMSOImmNot() const { 672 if (Kind != k_Immediate) 673 return false; 674 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 675 if (!CE) return false; 676 int64_t Value = CE->getValue(); 677 return ARM_AM::getSOImmVal(~Value) != -1; 678 } 679 bool isT2SOImm() const { 680 if (Kind != k_Immediate) 681 return false; 682 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 683 if (!CE) return false; 684 int64_t Value = CE->getValue(); 685 return ARM_AM::getT2SOImmVal(Value) != -1; 686 } 687 bool isT2SOImmNot() const { 688 if (Kind != k_Immediate) 689 return false; 690 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 691 if (!CE) return false; 692 int64_t Value = CE->getValue(); 693 return ARM_AM::getT2SOImmVal(~Value) != -1; 694 } 695 bool isSetEndImm() const { 696 if (Kind != k_Immediate) 697 return false; 698 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 699 if (!CE) return false; 700 int64_t Value = CE->getValue(); 701 return Value == 1 || Value == 0; 702 } 703 bool isReg() const { return Kind == k_Register; } 704 bool isRegList() const { return Kind == k_RegisterList; } 705 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 706 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 707 bool isToken() const { return Kind == k_Token; } 708 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 709 bool isMemory() const { return Kind == k_Memory; } 710 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 711 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 712 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 713 bool isRotImm() const { return Kind == k_RotateImmediate; } 714 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 715 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 716 bool isPostIdxReg() const { 717 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy ==ARM_AM::no_shift; 718 } 719 bool isMemNoOffset(bool alignOK = false) const { 720 if (!isMemory()) 721 return false; 722 // No offset of any kind. 723 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 724 (alignOK || Memory.Alignment == 0); 725 } 726 bool isAlignedMemory() const { 727 return isMemNoOffset(true); 728 } 729 bool isAddrMode2() const { 730 if (!isMemory() || Memory.Alignment != 0) return false; 731 // Check for register offset. 732 if (Memory.OffsetRegNum) return true; 733 // Immediate offset in range [-4095, 4095]. 734 if (!Memory.OffsetImm) return true; 735 int64_t Val = Memory.OffsetImm->getValue(); 736 return Val > -4096 && Val < 4096; 737 } 738 bool isAM2OffsetImm() const { 739 if (Kind != k_Immediate) 740 return false; 741 // Immediate offset in range [-4095, 4095]. 742 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 743 if (!CE) return false; 744 int64_t Val = CE->getValue(); 745 return Val > -4096 && Val < 4096; 746 } 747 bool isAddrMode3() const { 748 if (!isMemory() || Memory.Alignment != 0) return false; 749 // No shifts are legal for AM3. 750 if (Memory.ShiftType != ARM_AM::no_shift) return false; 751 // Check for register offset. 752 if (Memory.OffsetRegNum) return true; 753 // Immediate offset in range [-255, 255]. 754 if (!Memory.OffsetImm) return true; 755 int64_t Val = Memory.OffsetImm->getValue(); 756 return Val > -256 && Val < 256; 757 } 758 bool isAM3Offset() const { 759 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 760 return false; 761 if (Kind == k_PostIndexRegister) 762 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 763 // Immediate offset in range [-255, 255]. 764 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 765 if (!CE) return false; 766 int64_t Val = CE->getValue(); 767 // Special case, #-0 is INT32_MIN. 768 return (Val > -256 && Val < 256) || Val == INT32_MIN; 769 } 770 bool isAddrMode5() const { 771 // If we have an immediate that's not a constant, treat it as a label 772 // reference needing a fixup. If it is a constant, it's something else 773 // and we reject it. 774 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 775 return true; 776 if (!isMemory() || Memory.Alignment != 0) return false; 777 // Check for register offset. 778 if (Memory.OffsetRegNum) return false; 779 // Immediate offset in range [-1020, 1020] and a multiple of 4. 780 if (!Memory.OffsetImm) return true; 781 int64_t Val = Memory.OffsetImm->getValue(); 782 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 783 Val == INT32_MIN; 784 } 785 bool isMemTBB() const { 786 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 787 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 788 return false; 789 return true; 790 } 791 bool isMemTBH() const { 792 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 793 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 794 Memory.Alignment != 0 ) 795 return false; 796 return true; 797 } 798 bool isMemRegOffset() const { 799 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 800 return false; 801 return true; 802 } 803 bool isT2MemRegOffset() const { 804 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 805 Memory.Alignment != 0) 806 return false; 807 // Only lsl #{0, 1, 2, 3} allowed. 808 if (Memory.ShiftType == ARM_AM::no_shift) 809 return true; 810 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 811 return false; 812 return true; 813 } 814 bool isMemThumbRR() const { 815 // Thumb reg+reg addressing is simple. Just two registers, a base and 816 // an offset. No shifts, negations or any other complicating factors. 817 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 818 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 819 return false; 820 return isARMLowRegister(Memory.BaseRegNum) && 821 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 822 } 823 bool isMemThumbRIs4() const { 824 if (!isMemory() || Memory.OffsetRegNum != 0 || 825 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 826 return false; 827 // Immediate offset, multiple of 4 in range [0, 124]. 828 if (!Memory.OffsetImm) return true; 829 int64_t Val = Memory.OffsetImm->getValue(); 830 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 831 } 832 bool isMemThumbRIs2() const { 833 if (!isMemory() || Memory.OffsetRegNum != 0 || 834 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 835 return false; 836 // Immediate offset, multiple of 4 in range [0, 62]. 837 if (!Memory.OffsetImm) return true; 838 int64_t Val = Memory.OffsetImm->getValue(); 839 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 840 } 841 bool isMemThumbRIs1() const { 842 if (!isMemory() || Memory.OffsetRegNum != 0 || 843 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 844 return false; 845 // Immediate offset in range [0, 31]. 846 if (!Memory.OffsetImm) return true; 847 int64_t Val = Memory.OffsetImm->getValue(); 848 return Val >= 0 && Val <= 31; 849 } 850 bool isMemThumbSPI() const { 851 if (!isMemory() || Memory.OffsetRegNum != 0 || 852 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 853 return false; 854 // Immediate offset, multiple of 4 in range [0, 1020]. 855 if (!Memory.OffsetImm) return true; 856 int64_t Val = Memory.OffsetImm->getValue(); 857 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 858 } 859 bool isMemImm8s4Offset() const { 860 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 861 return false; 862 // Immediate offset a multiple of 4 in range [-1020, 1020]. 863 if (!Memory.OffsetImm) return true; 864 int64_t Val = Memory.OffsetImm->getValue(); 865 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 866 } 867 bool isMemImm0_1020s4Offset() const { 868 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 869 return false; 870 // Immediate offset a multiple of 4 in range [0, 1020]. 871 if (!Memory.OffsetImm) return true; 872 int64_t Val = Memory.OffsetImm->getValue(); 873 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 874 } 875 bool isMemImm8Offset() const { 876 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 877 return false; 878 // Immediate offset in range [-255, 255]. 879 if (!Memory.OffsetImm) return true; 880 int64_t Val = Memory.OffsetImm->getValue(); 881 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 882 } 883 bool isMemPosImm8Offset() const { 884 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 885 return false; 886 // Immediate offset in range [0, 255]. 887 if (!Memory.OffsetImm) return true; 888 int64_t Val = Memory.OffsetImm->getValue(); 889 return Val >= 0 && Val < 256; 890 } 891 bool isMemNegImm8Offset() const { 892 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 893 return false; 894 // Immediate offset in range [-255, -1]. 895 if (!Memory.OffsetImm) return true; 896 int64_t Val = Memory.OffsetImm->getValue(); 897 return Val > -256 && Val < 0; 898 } 899 bool isMemUImm12Offset() const { 900 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 901 return false; 902 // Immediate offset in range [0, 4095]. 903 if (!Memory.OffsetImm) return true; 904 int64_t Val = Memory.OffsetImm->getValue(); 905 return (Val >= 0 && Val < 4096); 906 } 907 bool isMemImm12Offset() const { 908 // If we have an immediate that's not a constant, treat it as a label 909 // reference needing a fixup. If it is a constant, it's something else 910 // and we reject it. 911 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 912 return true; 913 914 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 915 return false; 916 // Immediate offset in range [-4095, 4095]. 917 if (!Memory.OffsetImm) return true; 918 int64_t Val = Memory.OffsetImm->getValue(); 919 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 920 } 921 bool isPostIdxImm8() const { 922 if (Kind != k_Immediate) 923 return false; 924 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 925 if (!CE) return false; 926 int64_t Val = CE->getValue(); 927 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 928 } 929 bool isPostIdxImm8s4() const { 930 if (Kind != k_Immediate) 931 return false; 932 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 933 if (!CE) return false; 934 int64_t Val = CE->getValue(); 935 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 936 (Val == INT32_MIN); 937 } 938 939 bool isMSRMask() const { return Kind == k_MSRMask; } 940 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 941 942 // NEON operands. 943 bool isVecListOneD() const { 944 if (Kind != k_VectorList) return false; 945 return VectorList.Count == 1; 946 } 947 948 bool isVecListTwoD() const { 949 if (Kind != k_VectorList) return false; 950 return VectorList.Count == 2; 951 } 952 953 bool isVecListThreeD() const { 954 if (Kind != k_VectorList) return false; 955 return VectorList.Count == 3; 956 } 957 958 bool isVecListFourD() const { 959 if (Kind != k_VectorList) return false; 960 return VectorList.Count == 4; 961 } 962 963 bool isVecListTwoQ() const { 964 if (Kind != k_VectorList) return false; 965 //FIXME: We haven't taught the parser to handle by-two register lists 966 // yet, so don't pretend to know one. 967 return VectorList.Count == 2 && false; 968 } 969 970 bool isVectorIndex8() const { 971 if (Kind != k_VectorIndex) return false; 972 return VectorIndex.Val < 8; 973 } 974 bool isVectorIndex16() const { 975 if (Kind != k_VectorIndex) return false; 976 return VectorIndex.Val < 4; 977 } 978 bool isVectorIndex32() const { 979 if (Kind != k_VectorIndex) return false; 980 return VectorIndex.Val < 2; 981 } 982 983 bool isNEONi8splat() const { 984 if (Kind != k_Immediate) 985 return false; 986 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 987 // Must be a constant. 988 if (!CE) return false; 989 int64_t Value = CE->getValue(); 990 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 991 // value. 992 return Value >= 0 && Value < 256; 993 } 994 995 bool isNEONi16splat() const { 996 if (Kind != k_Immediate) 997 return false; 998 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 999 // Must be a constant. 1000 if (!CE) return false; 1001 int64_t Value = CE->getValue(); 1002 // i16 value in the range [0,255] or [0x0100, 0xff00] 1003 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 1004 } 1005 1006 bool isNEONi32splat() const { 1007 if (Kind != k_Immediate) 1008 return false; 1009 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1010 // Must be a constant. 1011 if (!CE) return false; 1012 int64_t Value = CE->getValue(); 1013 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 1014 return (Value >= 0 && Value < 256) || 1015 (Value >= 0x0100 && Value <= 0xff00) || 1016 (Value >= 0x010000 && Value <= 0xff0000) || 1017 (Value >= 0x01000000 && Value <= 0xff000000); 1018 } 1019 1020 bool isNEONi32vmov() const { 1021 if (Kind != k_Immediate) 1022 return false; 1023 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1024 // Must be a constant. 1025 if (!CE) return false; 1026 int64_t Value = CE->getValue(); 1027 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1028 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1029 return (Value >= 0 && Value < 256) || 1030 (Value >= 0x0100 && Value <= 0xff00) || 1031 (Value >= 0x010000 && Value <= 0xff0000) || 1032 (Value >= 0x01000000 && Value <= 0xff000000) || 1033 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1034 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1035 } 1036 1037 bool isNEONi64splat() const { 1038 if (Kind != k_Immediate) 1039 return false; 1040 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1041 // Must be a constant. 1042 if (!CE) return false; 1043 uint64_t Value = CE->getValue(); 1044 // i64 value with each byte being either 0 or 0xff. 1045 for (unsigned i = 0; i < 8; ++i) 1046 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1047 return true; 1048 } 1049 1050 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1051 // Add as immediates when possible. Null MCExpr = 0. 1052 if (Expr == 0) 1053 Inst.addOperand(MCOperand::CreateImm(0)); 1054 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1055 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1056 else 1057 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1058 } 1059 1060 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1061 assert(N == 2 && "Invalid number of operands!"); 1062 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1063 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1064 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1065 } 1066 1067 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1068 assert(N == 1 && "Invalid number of operands!"); 1069 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1070 } 1071 1072 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1073 assert(N == 1 && "Invalid number of operands!"); 1074 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1075 } 1076 1077 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1078 assert(N == 1 && "Invalid number of operands!"); 1079 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1080 } 1081 1082 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1083 assert(N == 1 && "Invalid number of operands!"); 1084 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1085 } 1086 1087 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1088 assert(N == 1 && "Invalid number of operands!"); 1089 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1090 } 1091 1092 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1093 assert(N == 1 && "Invalid number of operands!"); 1094 Inst.addOperand(MCOperand::CreateReg(getReg())); 1095 } 1096 1097 void addRegOperands(MCInst &Inst, unsigned N) const { 1098 assert(N == 1 && "Invalid number of operands!"); 1099 Inst.addOperand(MCOperand::CreateReg(getReg())); 1100 } 1101 1102 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1103 assert(N == 3 && "Invalid number of operands!"); 1104 assert(isRegShiftedReg() && 1105 "addRegShiftedRegOperands() on non RegShiftedReg!"); 1106 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1107 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1108 Inst.addOperand(MCOperand::CreateImm( 1109 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1110 } 1111 1112 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1113 assert(N == 2 && "Invalid number of operands!"); 1114 assert(isRegShiftedImm() && 1115 "addRegShiftedImmOperands() on non RegShiftedImm!"); 1116 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1117 Inst.addOperand(MCOperand::CreateImm( 1118 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1119 } 1120 1121 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1122 assert(N == 1 && "Invalid number of operands!"); 1123 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1124 ShifterImm.Imm)); 1125 } 1126 1127 void addRegListOperands(MCInst &Inst, unsigned N) const { 1128 assert(N == 1 && "Invalid number of operands!"); 1129 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1130 for (SmallVectorImpl<unsigned>::const_iterator 1131 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1132 Inst.addOperand(MCOperand::CreateReg(*I)); 1133 } 1134 1135 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1136 addRegListOperands(Inst, N); 1137 } 1138 1139 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1140 addRegListOperands(Inst, N); 1141 } 1142 1143 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1144 assert(N == 1 && "Invalid number of operands!"); 1145 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1146 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1147 } 1148 1149 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1150 assert(N == 1 && "Invalid number of operands!"); 1151 // Munge the lsb/width into a bitfield mask. 1152 unsigned lsb = Bitfield.LSB; 1153 unsigned width = Bitfield.Width; 1154 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1155 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1156 (32 - (lsb + width))); 1157 Inst.addOperand(MCOperand::CreateImm(Mask)); 1158 } 1159 1160 void addImmOperands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 addExpr(Inst, getImm()); 1163 } 1164 1165 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1166 assert(N == 1 && "Invalid number of operands!"); 1167 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1168 } 1169 1170 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1171 assert(N == 1 && "Invalid number of operands!"); 1172 // FIXME: We really want to scale the value here, but the LDRD/STRD 1173 // instruction don't encode operands that way yet. 1174 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1175 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1176 } 1177 1178 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1179 assert(N == 1 && "Invalid number of operands!"); 1180 // The immediate is scaled by four in the encoding and is stored 1181 // in the MCInst as such. Lop off the low two bits here. 1182 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1183 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1184 } 1185 1186 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1187 assert(N == 1 && "Invalid number of operands!"); 1188 // The immediate is scaled by four in the encoding and is stored 1189 // in the MCInst as such. Lop off the low two bits here. 1190 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1191 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1192 } 1193 1194 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1195 assert(N == 1 && "Invalid number of operands!"); 1196 // The constant encodes as the immediate-1, and we store in the instruction 1197 // the bits as encoded, so subtract off one here. 1198 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1199 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1200 } 1201 1202 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1203 assert(N == 1 && "Invalid number of operands!"); 1204 // The constant encodes as the immediate-1, and we store in the instruction 1205 // the bits as encoded, so subtract off one here. 1206 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1207 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1208 } 1209 1210 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1211 assert(N == 1 && "Invalid number of operands!"); 1212 // The constant encodes as the immediate, except for 32, which encodes as 1213 // zero. 1214 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1215 unsigned Imm = CE->getValue(); 1216 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1217 } 1218 1219 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1220 assert(N == 1 && "Invalid number of operands!"); 1221 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1222 // the instruction as well. 1223 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1224 int Val = CE->getValue(); 1225 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1226 } 1227 1228 void addT2SOImmNotOperands(MCInst &Inst, unsigned N) const { 1229 assert(N == 1 && "Invalid number of operands!"); 1230 // The operand is actually a t2_so_imm, but we have its bitwise 1231 // negation in the assembly source, so twiddle it here. 1232 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1233 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1234 } 1235 1236 void addARMSOImmNotOperands(MCInst &Inst, unsigned N) const { 1237 assert(N == 1 && "Invalid number of operands!"); 1238 // The operand is actually a so_imm, but we have its bitwise 1239 // negation in the assembly source, so twiddle it here. 1240 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1241 Inst.addOperand(MCOperand::CreateImm(~CE->getValue())); 1242 } 1243 1244 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1245 assert(N == 1 && "Invalid number of operands!"); 1246 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1247 } 1248 1249 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1250 assert(N == 1 && "Invalid number of operands!"); 1251 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1252 } 1253 1254 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1255 assert(N == 2 && "Invalid number of operands!"); 1256 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1257 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1258 } 1259 1260 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1261 assert(N == 3 && "Invalid number of operands!"); 1262 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1263 if (!Memory.OffsetRegNum) { 1264 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1265 // Special case for #-0 1266 if (Val == INT32_MIN) Val = 0; 1267 if (Val < 0) Val = -Val; 1268 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1269 } else { 1270 // For register offset, we encode the shift type and negation flag 1271 // here. 1272 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1273 Memory.ShiftImm, Memory.ShiftType); 1274 } 1275 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1276 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1277 Inst.addOperand(MCOperand::CreateImm(Val)); 1278 } 1279 1280 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1281 assert(N == 2 && "Invalid number of operands!"); 1282 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1283 assert(CE && "non-constant AM2OffsetImm operand!"); 1284 int32_t Val = CE->getValue(); 1285 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1286 // Special case for #-0 1287 if (Val == INT32_MIN) Val = 0; 1288 if (Val < 0) Val = -Val; 1289 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1290 Inst.addOperand(MCOperand::CreateReg(0)); 1291 Inst.addOperand(MCOperand::CreateImm(Val)); 1292 } 1293 1294 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1295 assert(N == 3 && "Invalid number of operands!"); 1296 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1297 if (!Memory.OffsetRegNum) { 1298 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1299 // Special case for #-0 1300 if (Val == INT32_MIN) Val = 0; 1301 if (Val < 0) Val = -Val; 1302 Val = ARM_AM::getAM3Opc(AddSub, Val); 1303 } else { 1304 // For register offset, we encode the shift type and negation flag 1305 // here. 1306 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1307 } 1308 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1309 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1310 Inst.addOperand(MCOperand::CreateImm(Val)); 1311 } 1312 1313 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1314 assert(N == 2 && "Invalid number of operands!"); 1315 if (Kind == k_PostIndexRegister) { 1316 int32_t Val = 1317 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1318 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1319 Inst.addOperand(MCOperand::CreateImm(Val)); 1320 return; 1321 } 1322 1323 // Constant offset. 1324 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1325 int32_t Val = CE->getValue(); 1326 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1327 // Special case for #-0 1328 if (Val == INT32_MIN) Val = 0; 1329 if (Val < 0) Val = -Val; 1330 Val = ARM_AM::getAM3Opc(AddSub, Val); 1331 Inst.addOperand(MCOperand::CreateReg(0)); 1332 Inst.addOperand(MCOperand::CreateImm(Val)); 1333 } 1334 1335 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1336 assert(N == 2 && "Invalid number of operands!"); 1337 // If we have an immediate that's not a constant, treat it as a label 1338 // reference needing a fixup. If it is a constant, it's something else 1339 // and we reject it. 1340 if (isImm()) { 1341 Inst.addOperand(MCOperand::CreateExpr(getImm())); 1342 Inst.addOperand(MCOperand::CreateImm(0)); 1343 return; 1344 } 1345 1346 // The lower two bits are always zero and as such are not encoded. 1347 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1348 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1349 // Special case for #-0 1350 if (Val == INT32_MIN) Val = 0; 1351 if (Val < 0) Val = -Val; 1352 Val = ARM_AM::getAM5Opc(AddSub, Val); 1353 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1354 Inst.addOperand(MCOperand::CreateImm(Val)); 1355 } 1356 1357 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1358 assert(N == 2 && "Invalid number of operands!"); 1359 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1360 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1361 Inst.addOperand(MCOperand::CreateImm(Val)); 1362 } 1363 1364 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1365 assert(N == 2 && "Invalid number of operands!"); 1366 // The lower two bits are always zero and as such are not encoded. 1367 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1368 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1369 Inst.addOperand(MCOperand::CreateImm(Val)); 1370 } 1371 1372 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1373 assert(N == 2 && "Invalid number of operands!"); 1374 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1375 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1376 Inst.addOperand(MCOperand::CreateImm(Val)); 1377 } 1378 1379 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1380 addMemImm8OffsetOperands(Inst, N); 1381 } 1382 1383 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1384 addMemImm8OffsetOperands(Inst, N); 1385 } 1386 1387 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1388 assert(N == 2 && "Invalid number of operands!"); 1389 // If this is an immediate, it's a label reference. 1390 if (Kind == k_Immediate) { 1391 addExpr(Inst, getImm()); 1392 Inst.addOperand(MCOperand::CreateImm(0)); 1393 return; 1394 } 1395 1396 // Otherwise, it's a normal memory reg+offset. 1397 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1398 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1399 Inst.addOperand(MCOperand::CreateImm(Val)); 1400 } 1401 1402 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1403 assert(N == 2 && "Invalid number of operands!"); 1404 // If this is an immediate, it's a label reference. 1405 if (Kind == k_Immediate) { 1406 addExpr(Inst, getImm()); 1407 Inst.addOperand(MCOperand::CreateImm(0)); 1408 return; 1409 } 1410 1411 // Otherwise, it's a normal memory reg+offset. 1412 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1413 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1414 Inst.addOperand(MCOperand::CreateImm(Val)); 1415 } 1416 1417 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1418 assert(N == 2 && "Invalid number of operands!"); 1419 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1420 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1421 } 1422 1423 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1424 assert(N == 2 && "Invalid number of operands!"); 1425 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1426 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1427 } 1428 1429 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1430 assert(N == 3 && "Invalid number of operands!"); 1431 unsigned Val = 1432 ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1433 Memory.ShiftImm, Memory.ShiftType); 1434 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1435 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1436 Inst.addOperand(MCOperand::CreateImm(Val)); 1437 } 1438 1439 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1440 assert(N == 3 && "Invalid number of operands!"); 1441 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1442 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1443 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1444 } 1445 1446 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1447 assert(N == 2 && "Invalid number of operands!"); 1448 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1449 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1450 } 1451 1452 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1453 assert(N == 2 && "Invalid number of operands!"); 1454 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1455 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1456 Inst.addOperand(MCOperand::CreateImm(Val)); 1457 } 1458 1459 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1460 assert(N == 2 && "Invalid number of operands!"); 1461 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1462 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1463 Inst.addOperand(MCOperand::CreateImm(Val)); 1464 } 1465 1466 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1467 assert(N == 2 && "Invalid number of operands!"); 1468 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1469 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1470 Inst.addOperand(MCOperand::CreateImm(Val)); 1471 } 1472 1473 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1474 assert(N == 2 && "Invalid number of operands!"); 1475 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1476 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1477 Inst.addOperand(MCOperand::CreateImm(Val)); 1478 } 1479 1480 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1481 assert(N == 1 && "Invalid number of operands!"); 1482 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1483 assert(CE && "non-constant post-idx-imm8 operand!"); 1484 int Imm = CE->getValue(); 1485 bool isAdd = Imm >= 0; 1486 if (Imm == INT32_MIN) Imm = 0; 1487 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1488 Inst.addOperand(MCOperand::CreateImm(Imm)); 1489 } 1490 1491 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1492 assert(N == 1 && "Invalid number of operands!"); 1493 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1494 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1495 int Imm = CE->getValue(); 1496 bool isAdd = Imm >= 0; 1497 if (Imm == INT32_MIN) Imm = 0; 1498 // Immediate is scaled by 4. 1499 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1500 Inst.addOperand(MCOperand::CreateImm(Imm)); 1501 } 1502 1503 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1504 assert(N == 2 && "Invalid number of operands!"); 1505 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1506 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1507 } 1508 1509 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1510 assert(N == 2 && "Invalid number of operands!"); 1511 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1512 // The sign, shift type, and shift amount are encoded in a single operand 1513 // using the AM2 encoding helpers. 1514 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1515 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1516 PostIdxReg.ShiftTy); 1517 Inst.addOperand(MCOperand::CreateImm(Imm)); 1518 } 1519 1520 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1521 assert(N == 1 && "Invalid number of operands!"); 1522 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1523 } 1524 1525 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1526 assert(N == 1 && "Invalid number of operands!"); 1527 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1528 } 1529 1530 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1531 assert(N == 1 && "Invalid number of operands!"); 1532 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1533 } 1534 1535 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1536 assert(N == 1 && "Invalid number of operands!"); 1537 // Only the first register actually goes on the instruction. The rest 1538 // are implied by the opcode. 1539 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1540 } 1541 1542 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1543 assert(N == 1 && "Invalid number of operands!"); 1544 // Only the first register actually goes on the instruction. The rest 1545 // are implied by the opcode. 1546 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1547 } 1548 1549 void addVecListFourDOperands(MCInst &Inst, unsigned N) const { 1550 assert(N == 1 && "Invalid number of operands!"); 1551 // Only the first register actually goes on the instruction. The rest 1552 // are implied by the opcode. 1553 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1554 } 1555 1556 void addVecListTwoQOperands(MCInst &Inst, unsigned N) const { 1557 assert(N == 1 && "Invalid number of operands!"); 1558 // Only the first register actually goes on the instruction. The rest 1559 // are implied by the opcode. 1560 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1561 } 1562 1563 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1564 assert(N == 1 && "Invalid number of operands!"); 1565 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1566 } 1567 1568 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1569 assert(N == 1 && "Invalid number of operands!"); 1570 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1571 } 1572 1573 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1574 assert(N == 1 && "Invalid number of operands!"); 1575 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1576 } 1577 1578 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1579 assert(N == 1 && "Invalid number of operands!"); 1580 // The immediate encodes the type of constant as well as the value. 1581 // Mask in that this is an i8 splat. 1582 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1583 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1584 } 1585 1586 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1587 assert(N == 1 && "Invalid number of operands!"); 1588 // The immediate encodes the type of constant as well as the value. 1589 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1590 unsigned Value = CE->getValue(); 1591 if (Value >= 256) 1592 Value = (Value >> 8) | 0xa00; 1593 else 1594 Value |= 0x800; 1595 Inst.addOperand(MCOperand::CreateImm(Value)); 1596 } 1597 1598 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1599 assert(N == 1 && "Invalid number of operands!"); 1600 // The immediate encodes the type of constant as well as the value. 1601 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1602 unsigned Value = CE->getValue(); 1603 if (Value >= 256 && Value <= 0xff00) 1604 Value = (Value >> 8) | 0x200; 1605 else if (Value > 0xffff && Value <= 0xff0000) 1606 Value = (Value >> 16) | 0x400; 1607 else if (Value > 0xffffff) 1608 Value = (Value >> 24) | 0x600; 1609 Inst.addOperand(MCOperand::CreateImm(Value)); 1610 } 1611 1612 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1613 assert(N == 1 && "Invalid number of operands!"); 1614 // The immediate encodes the type of constant as well as the value. 1615 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1616 unsigned Value = CE->getValue(); 1617 if (Value >= 256 && Value <= 0xffff) 1618 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1619 else if (Value > 0xffff && Value <= 0xffffff) 1620 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1621 else if (Value > 0xffffff) 1622 Value = (Value >> 24) | 0x600; 1623 Inst.addOperand(MCOperand::CreateImm(Value)); 1624 } 1625 1626 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1627 assert(N == 1 && "Invalid number of operands!"); 1628 // The immediate encodes the type of constant as well as the value. 1629 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1630 uint64_t Value = CE->getValue(); 1631 unsigned Imm = 0; 1632 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1633 Imm |= (Value & 1) << i; 1634 } 1635 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1636 } 1637 1638 virtual void print(raw_ostream &OS) const; 1639 1640 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1641 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1642 Op->ITMask.Mask = Mask; 1643 Op->StartLoc = S; 1644 Op->EndLoc = S; 1645 return Op; 1646 } 1647 1648 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1649 ARMOperand *Op = new ARMOperand(k_CondCode); 1650 Op->CC.Val = CC; 1651 Op->StartLoc = S; 1652 Op->EndLoc = S; 1653 return Op; 1654 } 1655 1656 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1657 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1658 Op->Cop.Val = CopVal; 1659 Op->StartLoc = S; 1660 Op->EndLoc = S; 1661 return Op; 1662 } 1663 1664 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1665 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1666 Op->Cop.Val = CopVal; 1667 Op->StartLoc = S; 1668 Op->EndLoc = S; 1669 return Op; 1670 } 1671 1672 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1673 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1674 Op->Cop.Val = Val; 1675 Op->StartLoc = S; 1676 Op->EndLoc = E; 1677 return Op; 1678 } 1679 1680 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1681 ARMOperand *Op = new ARMOperand(k_CCOut); 1682 Op->Reg.RegNum = RegNum; 1683 Op->StartLoc = S; 1684 Op->EndLoc = S; 1685 return Op; 1686 } 1687 1688 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1689 ARMOperand *Op = new ARMOperand(k_Token); 1690 Op->Tok.Data = Str.data(); 1691 Op->Tok.Length = Str.size(); 1692 Op->StartLoc = S; 1693 Op->EndLoc = S; 1694 return Op; 1695 } 1696 1697 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1698 ARMOperand *Op = new ARMOperand(k_Register); 1699 Op->Reg.RegNum = RegNum; 1700 Op->StartLoc = S; 1701 Op->EndLoc = E; 1702 return Op; 1703 } 1704 1705 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1706 unsigned SrcReg, 1707 unsigned ShiftReg, 1708 unsigned ShiftImm, 1709 SMLoc S, SMLoc E) { 1710 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1711 Op->RegShiftedReg.ShiftTy = ShTy; 1712 Op->RegShiftedReg.SrcReg = SrcReg; 1713 Op->RegShiftedReg.ShiftReg = ShiftReg; 1714 Op->RegShiftedReg.ShiftImm = ShiftImm; 1715 Op->StartLoc = S; 1716 Op->EndLoc = E; 1717 return Op; 1718 } 1719 1720 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1721 unsigned SrcReg, 1722 unsigned ShiftImm, 1723 SMLoc S, SMLoc E) { 1724 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1725 Op->RegShiftedImm.ShiftTy = ShTy; 1726 Op->RegShiftedImm.SrcReg = SrcReg; 1727 Op->RegShiftedImm.ShiftImm = ShiftImm; 1728 Op->StartLoc = S; 1729 Op->EndLoc = E; 1730 return Op; 1731 } 1732 1733 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1734 SMLoc S, SMLoc E) { 1735 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1736 Op->ShifterImm.isASR = isASR; 1737 Op->ShifterImm.Imm = Imm; 1738 Op->StartLoc = S; 1739 Op->EndLoc = E; 1740 return Op; 1741 } 1742 1743 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1744 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1745 Op->RotImm.Imm = Imm; 1746 Op->StartLoc = S; 1747 Op->EndLoc = E; 1748 return Op; 1749 } 1750 1751 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1752 SMLoc S, SMLoc E) { 1753 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1754 Op->Bitfield.LSB = LSB; 1755 Op->Bitfield.Width = Width; 1756 Op->StartLoc = S; 1757 Op->EndLoc = E; 1758 return Op; 1759 } 1760 1761 static ARMOperand * 1762 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1763 SMLoc StartLoc, SMLoc EndLoc) { 1764 KindTy Kind = k_RegisterList; 1765 1766 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1767 Kind = k_DPRRegisterList; 1768 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1769 contains(Regs.front().first)) 1770 Kind = k_SPRRegisterList; 1771 1772 ARMOperand *Op = new ARMOperand(Kind); 1773 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1774 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1775 Op->Registers.push_back(I->first); 1776 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1777 Op->StartLoc = StartLoc; 1778 Op->EndLoc = EndLoc; 1779 return Op; 1780 } 1781 1782 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1783 SMLoc S, SMLoc E) { 1784 ARMOperand *Op = new ARMOperand(k_VectorList); 1785 Op->VectorList.RegNum = RegNum; 1786 Op->VectorList.Count = Count; 1787 Op->StartLoc = S; 1788 Op->EndLoc = E; 1789 return Op; 1790 } 1791 1792 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1793 MCContext &Ctx) { 1794 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1795 Op->VectorIndex.Val = Idx; 1796 Op->StartLoc = S; 1797 Op->EndLoc = E; 1798 return Op; 1799 } 1800 1801 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1802 ARMOperand *Op = new ARMOperand(k_Immediate); 1803 Op->Imm.Val = Val; 1804 Op->StartLoc = S; 1805 Op->EndLoc = E; 1806 return Op; 1807 } 1808 1809 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1810 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1811 Op->FPImm.Val = Val; 1812 Op->StartLoc = S; 1813 Op->EndLoc = S; 1814 return Op; 1815 } 1816 1817 static ARMOperand *CreateMem(unsigned BaseRegNum, 1818 const MCConstantExpr *OffsetImm, 1819 unsigned OffsetRegNum, 1820 ARM_AM::ShiftOpc ShiftType, 1821 unsigned ShiftImm, 1822 unsigned Alignment, 1823 bool isNegative, 1824 SMLoc S, SMLoc E) { 1825 ARMOperand *Op = new ARMOperand(k_Memory); 1826 Op->Memory.BaseRegNum = BaseRegNum; 1827 Op->Memory.OffsetImm = OffsetImm; 1828 Op->Memory.OffsetRegNum = OffsetRegNum; 1829 Op->Memory.ShiftType = ShiftType; 1830 Op->Memory.ShiftImm = ShiftImm; 1831 Op->Memory.Alignment = Alignment; 1832 Op->Memory.isNegative = isNegative; 1833 Op->StartLoc = S; 1834 Op->EndLoc = E; 1835 return Op; 1836 } 1837 1838 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1839 ARM_AM::ShiftOpc ShiftTy, 1840 unsigned ShiftImm, 1841 SMLoc S, SMLoc E) { 1842 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1843 Op->PostIdxReg.RegNum = RegNum; 1844 Op->PostIdxReg.isAdd = isAdd; 1845 Op->PostIdxReg.ShiftTy = ShiftTy; 1846 Op->PostIdxReg.ShiftImm = ShiftImm; 1847 Op->StartLoc = S; 1848 Op->EndLoc = E; 1849 return Op; 1850 } 1851 1852 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1853 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1854 Op->MBOpt.Val = Opt; 1855 Op->StartLoc = S; 1856 Op->EndLoc = S; 1857 return Op; 1858 } 1859 1860 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1861 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1862 Op->IFlags.Val = IFlags; 1863 Op->StartLoc = S; 1864 Op->EndLoc = S; 1865 return Op; 1866 } 1867 1868 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1869 ARMOperand *Op = new ARMOperand(k_MSRMask); 1870 Op->MMask.Val = MMask; 1871 Op->StartLoc = S; 1872 Op->EndLoc = S; 1873 return Op; 1874 } 1875}; 1876 1877} // end anonymous namespace. 1878 1879void ARMOperand::print(raw_ostream &OS) const { 1880 switch (Kind) { 1881 case k_FPImmediate: 1882 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1883 << ") >"; 1884 break; 1885 case k_CondCode: 1886 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1887 break; 1888 case k_CCOut: 1889 OS << "<ccout " << getReg() << ">"; 1890 break; 1891 case k_ITCondMask: { 1892 static const char *MaskStr[] = { 1893 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 1894 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 1895 }; 1896 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1897 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1898 break; 1899 } 1900 case k_CoprocNum: 1901 OS << "<coprocessor number: " << getCoproc() << ">"; 1902 break; 1903 case k_CoprocReg: 1904 OS << "<coprocessor register: " << getCoproc() << ">"; 1905 break; 1906 case k_CoprocOption: 1907 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1908 break; 1909 case k_MSRMask: 1910 OS << "<mask: " << getMSRMask() << ">"; 1911 break; 1912 case k_Immediate: 1913 getImm()->print(OS); 1914 break; 1915 case k_MemBarrierOpt: 1916 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1917 break; 1918 case k_Memory: 1919 OS << "<memory " 1920 << " base:" << Memory.BaseRegNum; 1921 OS << ">"; 1922 break; 1923 case k_PostIndexRegister: 1924 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1925 << PostIdxReg.RegNum; 1926 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1927 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1928 << PostIdxReg.ShiftImm; 1929 OS << ">"; 1930 break; 1931 case k_ProcIFlags: { 1932 OS << "<ARM_PROC::"; 1933 unsigned IFlags = getProcIFlags(); 1934 for (int i=2; i >= 0; --i) 1935 if (IFlags & (1 << i)) 1936 OS << ARM_PROC::IFlagsToString(1 << i); 1937 OS << ">"; 1938 break; 1939 } 1940 case k_Register: 1941 OS << "<register " << getReg() << ">"; 1942 break; 1943 case k_ShifterImmediate: 1944 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1945 << " #" << ShifterImm.Imm << ">"; 1946 break; 1947 case k_ShiftedRegister: 1948 OS << "<so_reg_reg " 1949 << RegShiftedReg.SrcReg << " " 1950 << ARM_AM::getShiftOpcStr(RegShiftedReg.ShiftTy) 1951 << " " << RegShiftedReg.ShiftReg << ">"; 1952 break; 1953 case k_ShiftedImmediate: 1954 OS << "<so_reg_imm " 1955 << RegShiftedImm.SrcReg << " " 1956 << ARM_AM::getShiftOpcStr(RegShiftedImm.ShiftTy) 1957 << " #" << RegShiftedImm.ShiftImm << ">"; 1958 break; 1959 case k_RotateImmediate: 1960 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1961 break; 1962 case k_BitfieldDescriptor: 1963 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1964 << ", width: " << Bitfield.Width << ">"; 1965 break; 1966 case k_RegisterList: 1967 case k_DPRRegisterList: 1968 case k_SPRRegisterList: { 1969 OS << "<register_list "; 1970 1971 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1972 for (SmallVectorImpl<unsigned>::const_iterator 1973 I = RegList.begin(), E = RegList.end(); I != E; ) { 1974 OS << *I; 1975 if (++I < E) OS << ", "; 1976 } 1977 1978 OS << ">"; 1979 break; 1980 } 1981 case k_VectorList: 1982 OS << "<vector_list " << VectorList.Count << " * " 1983 << VectorList.RegNum << ">"; 1984 break; 1985 case k_Token: 1986 OS << "'" << getToken() << "'"; 1987 break; 1988 case k_VectorIndex: 1989 OS << "<vectorindex " << getVectorIndex() << ">"; 1990 break; 1991 } 1992} 1993 1994/// @name Auto-generated Match Functions 1995/// { 1996 1997static unsigned MatchRegisterName(StringRef Name); 1998 1999/// } 2000 2001bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2002 SMLoc &StartLoc, SMLoc &EndLoc) { 2003 RegNo = tryParseRegister(); 2004 2005 return (RegNo == (unsigned)-1); 2006} 2007 2008/// Try to parse a register name. The token must be an Identifier when called, 2009/// and if it is a register name the token is eaten and the register number is 2010/// returned. Otherwise return -1. 2011/// 2012int ARMAsmParser::tryParseRegister() { 2013 const AsmToken &Tok = Parser.getTok(); 2014 if (Tok.isNot(AsmToken::Identifier)) return -1; 2015 2016 // FIXME: Validate register for the current architecture; we have to do 2017 // validation later, so maybe there is no need for this here. 2018 std::string lowerCase = Tok.getString().lower(); 2019 unsigned RegNum = MatchRegisterName(lowerCase); 2020 if (!RegNum) { 2021 RegNum = StringSwitch<unsigned>(lowerCase) 2022 .Case("r13", ARM::SP) 2023 .Case("r14", ARM::LR) 2024 .Case("r15", ARM::PC) 2025 .Case("ip", ARM::R12) 2026 .Default(0); 2027 } 2028 if (!RegNum) return -1; 2029 2030 Parser.Lex(); // Eat identifier token. 2031 2032 return RegNum; 2033} 2034 2035// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2036// If a recoverable error occurs, return 1. If an irrecoverable error 2037// occurs, return -1. An irrecoverable error is one where tokens have been 2038// consumed in the process of trying to parse the shifter (i.e., when it is 2039// indeed a shifter operand, but malformed). 2040int ARMAsmParser::tryParseShiftRegister( 2041 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2042 SMLoc S = Parser.getTok().getLoc(); 2043 const AsmToken &Tok = Parser.getTok(); 2044 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2045 2046 std::string lowerCase = Tok.getString().lower(); 2047 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2048 .Case("lsl", ARM_AM::lsl) 2049 .Case("lsr", ARM_AM::lsr) 2050 .Case("asr", ARM_AM::asr) 2051 .Case("ror", ARM_AM::ror) 2052 .Case("rrx", ARM_AM::rrx) 2053 .Default(ARM_AM::no_shift); 2054 2055 if (ShiftTy == ARM_AM::no_shift) 2056 return 1; 2057 2058 Parser.Lex(); // Eat the operator. 2059 2060 // The source register for the shift has already been added to the 2061 // operand list, so we need to pop it off and combine it into the shifted 2062 // register operand instead. 2063 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2064 if (!PrevOp->isReg()) 2065 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2066 int SrcReg = PrevOp->getReg(); 2067 int64_t Imm = 0; 2068 int ShiftReg = 0; 2069 if (ShiftTy == ARM_AM::rrx) { 2070 // RRX Doesn't have an explicit shift amount. The encoder expects 2071 // the shift register to be the same as the source register. Seems odd, 2072 // but OK. 2073 ShiftReg = SrcReg; 2074 } else { 2075 // Figure out if this is shifted by a constant or a register (for non-RRX). 2076 if (Parser.getTok().is(AsmToken::Hash)) { 2077 Parser.Lex(); // Eat hash. 2078 SMLoc ImmLoc = Parser.getTok().getLoc(); 2079 const MCExpr *ShiftExpr = 0; 2080 if (getParser().ParseExpression(ShiftExpr)) { 2081 Error(ImmLoc, "invalid immediate shift value"); 2082 return -1; 2083 } 2084 // The expression must be evaluatable as an immediate. 2085 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2086 if (!CE) { 2087 Error(ImmLoc, "invalid immediate shift value"); 2088 return -1; 2089 } 2090 // Range check the immediate. 2091 // lsl, ror: 0 <= imm <= 31 2092 // lsr, asr: 0 <= imm <= 32 2093 Imm = CE->getValue(); 2094 if (Imm < 0 || 2095 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2096 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2097 Error(ImmLoc, "immediate shift value out of range"); 2098 return -1; 2099 } 2100 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2101 ShiftReg = tryParseRegister(); 2102 SMLoc L = Parser.getTok().getLoc(); 2103 if (ShiftReg == -1) { 2104 Error (L, "expected immediate or register in shift operand"); 2105 return -1; 2106 } 2107 } else { 2108 Error (Parser.getTok().getLoc(), 2109 "expected immediate or register in shift operand"); 2110 return -1; 2111 } 2112 } 2113 2114 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2115 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2116 ShiftReg, Imm, 2117 S, Parser.getTok().getLoc())); 2118 else 2119 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2120 S, Parser.getTok().getLoc())); 2121 2122 return 0; 2123} 2124 2125 2126/// Try to parse a register name. The token must be an Identifier when called. 2127/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2128/// if there is a "writeback". 'true' if it's not a register. 2129/// 2130/// TODO this is likely to change to allow different register types and or to 2131/// parse for a specific register type. 2132bool ARMAsmParser:: 2133tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2134 SMLoc S = Parser.getTok().getLoc(); 2135 int RegNo = tryParseRegister(); 2136 if (RegNo == -1) 2137 return true; 2138 2139 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2140 2141 const AsmToken &ExclaimTok = Parser.getTok(); 2142 if (ExclaimTok.is(AsmToken::Exclaim)) { 2143 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2144 ExclaimTok.getLoc())); 2145 Parser.Lex(); // Eat exclaim token 2146 return false; 2147 } 2148 2149 // Also check for an index operand. This is only legal for vector registers, 2150 // but that'll get caught OK in operand matching, so we don't need to 2151 // explicitly filter everything else out here. 2152 if (Parser.getTok().is(AsmToken::LBrac)) { 2153 SMLoc SIdx = Parser.getTok().getLoc(); 2154 Parser.Lex(); // Eat left bracket token. 2155 2156 const MCExpr *ImmVal; 2157 if (getParser().ParseExpression(ImmVal)) 2158 return MatchOperand_ParseFail; 2159 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2160 if (!MCE) { 2161 TokError("immediate value expected for vector index"); 2162 return MatchOperand_ParseFail; 2163 } 2164 2165 SMLoc E = Parser.getTok().getLoc(); 2166 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2167 Error(E, "']' expected"); 2168 return MatchOperand_ParseFail; 2169 } 2170 2171 Parser.Lex(); // Eat right bracket token. 2172 2173 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2174 SIdx, E, 2175 getContext())); 2176 } 2177 2178 return false; 2179} 2180 2181/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2182/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2183/// "c5", ... 2184static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2185 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2186 // but efficient. 2187 switch (Name.size()) { 2188 default: break; 2189 case 2: 2190 if (Name[0] != CoprocOp) 2191 return -1; 2192 switch (Name[1]) { 2193 default: return -1; 2194 case '0': return 0; 2195 case '1': return 1; 2196 case '2': return 2; 2197 case '3': return 3; 2198 case '4': return 4; 2199 case '5': return 5; 2200 case '6': return 6; 2201 case '7': return 7; 2202 case '8': return 8; 2203 case '9': return 9; 2204 } 2205 break; 2206 case 3: 2207 if (Name[0] != CoprocOp || Name[1] != '1') 2208 return -1; 2209 switch (Name[2]) { 2210 default: return -1; 2211 case '0': return 10; 2212 case '1': return 11; 2213 case '2': return 12; 2214 case '3': return 13; 2215 case '4': return 14; 2216 case '5': return 15; 2217 } 2218 break; 2219 } 2220 2221 return -1; 2222} 2223 2224/// parseITCondCode - Try to parse a condition code for an IT instruction. 2225ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2226parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2227 SMLoc S = Parser.getTok().getLoc(); 2228 const AsmToken &Tok = Parser.getTok(); 2229 if (!Tok.is(AsmToken::Identifier)) 2230 return MatchOperand_NoMatch; 2231 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2232 .Case("eq", ARMCC::EQ) 2233 .Case("ne", ARMCC::NE) 2234 .Case("hs", ARMCC::HS) 2235 .Case("cs", ARMCC::HS) 2236 .Case("lo", ARMCC::LO) 2237 .Case("cc", ARMCC::LO) 2238 .Case("mi", ARMCC::MI) 2239 .Case("pl", ARMCC::PL) 2240 .Case("vs", ARMCC::VS) 2241 .Case("vc", ARMCC::VC) 2242 .Case("hi", ARMCC::HI) 2243 .Case("ls", ARMCC::LS) 2244 .Case("ge", ARMCC::GE) 2245 .Case("lt", ARMCC::LT) 2246 .Case("gt", ARMCC::GT) 2247 .Case("le", ARMCC::LE) 2248 .Case("al", ARMCC::AL) 2249 .Default(~0U); 2250 if (CC == ~0U) 2251 return MatchOperand_NoMatch; 2252 Parser.Lex(); // Eat the token. 2253 2254 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2255 2256 return MatchOperand_Success; 2257} 2258 2259/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2260/// token must be an Identifier when called, and if it is a coprocessor 2261/// number, the token is eaten and the operand is added to the operand list. 2262ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2263parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2264 SMLoc S = Parser.getTok().getLoc(); 2265 const AsmToken &Tok = Parser.getTok(); 2266 if (Tok.isNot(AsmToken::Identifier)) 2267 return MatchOperand_NoMatch; 2268 2269 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2270 if (Num == -1) 2271 return MatchOperand_NoMatch; 2272 2273 Parser.Lex(); // Eat identifier token. 2274 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2275 return MatchOperand_Success; 2276} 2277 2278/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2279/// token must be an Identifier when called, and if it is a coprocessor 2280/// number, the token is eaten and the operand is added to the operand list. 2281ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2282parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2283 SMLoc S = Parser.getTok().getLoc(); 2284 const AsmToken &Tok = Parser.getTok(); 2285 if (Tok.isNot(AsmToken::Identifier)) 2286 return MatchOperand_NoMatch; 2287 2288 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2289 if (Reg == -1) 2290 return MatchOperand_NoMatch; 2291 2292 Parser.Lex(); // Eat identifier token. 2293 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2294 return MatchOperand_Success; 2295} 2296 2297/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2298/// coproc_option : '{' imm0_255 '}' 2299ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2300parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2301 SMLoc S = Parser.getTok().getLoc(); 2302 2303 // If this isn't a '{', this isn't a coprocessor immediate operand. 2304 if (Parser.getTok().isNot(AsmToken::LCurly)) 2305 return MatchOperand_NoMatch; 2306 Parser.Lex(); // Eat the '{' 2307 2308 const MCExpr *Expr; 2309 SMLoc Loc = Parser.getTok().getLoc(); 2310 if (getParser().ParseExpression(Expr)) { 2311 Error(Loc, "illegal expression"); 2312 return MatchOperand_ParseFail; 2313 } 2314 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2315 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2316 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2317 return MatchOperand_ParseFail; 2318 } 2319 int Val = CE->getValue(); 2320 2321 // Check for and consume the closing '}' 2322 if (Parser.getTok().isNot(AsmToken::RCurly)) 2323 return MatchOperand_ParseFail; 2324 SMLoc E = Parser.getTok().getLoc(); 2325 Parser.Lex(); // Eat the '}' 2326 2327 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2328 return MatchOperand_Success; 2329} 2330 2331// For register list parsing, we need to map from raw GPR register numbering 2332// to the enumeration values. The enumeration values aren't sorted by 2333// register number due to our using "sp", "lr" and "pc" as canonical names. 2334static unsigned getNextRegister(unsigned Reg) { 2335 // If this is a GPR, we need to do it manually, otherwise we can rely 2336 // on the sort ordering of the enumeration since the other reg-classes 2337 // are sane. 2338 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2339 return Reg + 1; 2340 switch(Reg) { 2341 default: assert(0 && "Invalid GPR number!"); 2342 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2343 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2344 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2345 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2346 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2347 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2348 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2349 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2350 } 2351} 2352 2353// Return the low-subreg of a given Q register. 2354static unsigned getDRegFromQReg(unsigned QReg) { 2355 switch (QReg) { 2356 default: llvm_unreachable("expected a Q register!"); 2357 case ARM::Q0: return ARM::D0; 2358 case ARM::Q1: return ARM::D2; 2359 case ARM::Q2: return ARM::D4; 2360 case ARM::Q3: return ARM::D6; 2361 case ARM::Q4: return ARM::D8; 2362 case ARM::Q5: return ARM::D10; 2363 case ARM::Q6: return ARM::D12; 2364 case ARM::Q7: return ARM::D14; 2365 case ARM::Q8: return ARM::D16; 2366 case ARM::Q9: return ARM::D18; 2367 case ARM::Q10: return ARM::D20; 2368 case ARM::Q11: return ARM::D22; 2369 case ARM::Q12: return ARM::D24; 2370 case ARM::Q13: return ARM::D26; 2371 case ARM::Q14: return ARM::D28; 2372 case ARM::Q15: return ARM::D30; 2373 } 2374} 2375 2376/// Parse a register list. 2377bool ARMAsmParser:: 2378parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2379 assert(Parser.getTok().is(AsmToken::LCurly) && 2380 "Token is not a Left Curly Brace"); 2381 SMLoc S = Parser.getTok().getLoc(); 2382 Parser.Lex(); // Eat '{' token. 2383 SMLoc RegLoc = Parser.getTok().getLoc(); 2384 2385 // Check the first register in the list to see what register class 2386 // this is a list of. 2387 int Reg = tryParseRegister(); 2388 if (Reg == -1) 2389 return Error(RegLoc, "register expected"); 2390 2391 // The reglist instructions have at most 16 registers, so reserve 2392 // space for that many. 2393 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2394 2395 // Allow Q regs and just interpret them as the two D sub-registers. 2396 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2397 Reg = getDRegFromQReg(Reg); 2398 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2399 ++Reg; 2400 } 2401 const MCRegisterClass *RC; 2402 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2403 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2404 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2405 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2406 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2407 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2408 else 2409 return Error(RegLoc, "invalid register in register list"); 2410 2411 // Store the register. 2412 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2413 2414 // This starts immediately after the first register token in the list, 2415 // so we can see either a comma or a minus (range separator) as a legal 2416 // next token. 2417 while (Parser.getTok().is(AsmToken::Comma) || 2418 Parser.getTok().is(AsmToken::Minus)) { 2419 if (Parser.getTok().is(AsmToken::Minus)) { 2420 Parser.Lex(); // Eat the minus. 2421 SMLoc EndLoc = Parser.getTok().getLoc(); 2422 int EndReg = tryParseRegister(); 2423 if (EndReg == -1) 2424 return Error(EndLoc, "register expected"); 2425 // Allow Q regs and just interpret them as the two D sub-registers. 2426 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2427 EndReg = getDRegFromQReg(EndReg) + 1; 2428 // If the register is the same as the start reg, there's nothing 2429 // more to do. 2430 if (Reg == EndReg) 2431 continue; 2432 // The register must be in the same register class as the first. 2433 if (!RC->contains(EndReg)) 2434 return Error(EndLoc, "invalid register in register list"); 2435 // Ranges must go from low to high. 2436 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2437 return Error(EndLoc, "bad range in register list"); 2438 2439 // Add all the registers in the range to the register list. 2440 while (Reg != EndReg) { 2441 Reg = getNextRegister(Reg); 2442 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2443 } 2444 continue; 2445 } 2446 Parser.Lex(); // Eat the comma. 2447 RegLoc = Parser.getTok().getLoc(); 2448 int OldReg = Reg; 2449 Reg = tryParseRegister(); 2450 if (Reg == -1) 2451 return Error(RegLoc, "register expected"); 2452 // Allow Q regs and just interpret them as the two D sub-registers. 2453 bool isQReg = false; 2454 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2455 Reg = getDRegFromQReg(Reg); 2456 isQReg = true; 2457 } 2458 // The register must be in the same register class as the first. 2459 if (!RC->contains(Reg)) 2460 return Error(RegLoc, "invalid register in register list"); 2461 // List must be monotonically increasing. 2462 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2463 return Error(RegLoc, "register list not in ascending order"); 2464 // VFP register lists must also be contiguous. 2465 // It's OK to use the enumeration values directly here rather, as the 2466 // VFP register classes have the enum sorted properly. 2467 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2468 Reg != OldReg + 1) 2469 return Error(RegLoc, "non-contiguous register range"); 2470 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2471 if (isQReg) 2472 Registers.push_back(std::pair<unsigned, SMLoc>(++Reg, RegLoc)); 2473 } 2474 2475 SMLoc E = Parser.getTok().getLoc(); 2476 if (Parser.getTok().isNot(AsmToken::RCurly)) 2477 return Error(E, "'}' expected"); 2478 Parser.Lex(); // Eat '}' token. 2479 2480 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2481 return false; 2482} 2483 2484// parse a vector register list 2485ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2486parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2487 SMLoc S = Parser.getTok().getLoc(); 2488 // As an extension (to match gas), support a plain D register or Q register 2489 // (without encosing curly braces) as a single or double entry list, 2490 // respectively. 2491 if (Parser.getTok().is(AsmToken::Identifier)) { 2492 int Reg = tryParseRegister(); 2493 if (Reg == -1) 2494 return MatchOperand_NoMatch; 2495 SMLoc E = Parser.getTok().getLoc(); 2496 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) { 2497 Operands.push_back(ARMOperand::CreateVectorList(Reg, 1, S, E)); 2498 return MatchOperand_Success; 2499 } 2500 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2501 Reg = getDRegFromQReg(Reg); 2502 Operands.push_back(ARMOperand::CreateVectorList(Reg, 2, S, E)); 2503 return MatchOperand_Success; 2504 } 2505 Error(S, "vector register expected"); 2506 return MatchOperand_ParseFail; 2507 } 2508 2509 if (Parser.getTok().isNot(AsmToken::LCurly)) 2510 return MatchOperand_NoMatch; 2511 2512 Parser.Lex(); // Eat '{' token. 2513 SMLoc RegLoc = Parser.getTok().getLoc(); 2514 2515 int Reg = tryParseRegister(); 2516 if (Reg == -1) { 2517 Error(RegLoc, "register expected"); 2518 return MatchOperand_ParseFail; 2519 } 2520 unsigned Count = 1; 2521 unsigned FirstReg = Reg; 2522 // The list is of D registers, but we also allow Q regs and just interpret 2523 // them as the two D sub-registers. 2524 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2525 FirstReg = Reg = getDRegFromQReg(Reg); 2526 ++Reg; 2527 ++Count; 2528 } 2529 2530 while (Parser.getTok().is(AsmToken::Comma) || 2531 Parser.getTok().is(AsmToken::Minus)) { 2532 if (Parser.getTok().is(AsmToken::Minus)) { 2533 Parser.Lex(); // Eat the minus. 2534 SMLoc EndLoc = Parser.getTok().getLoc(); 2535 int EndReg = tryParseRegister(); 2536 if (EndReg == -1) { 2537 Error(EndLoc, "register expected"); 2538 return MatchOperand_ParseFail; 2539 } 2540 // Allow Q regs and just interpret them as the two D sub-registers. 2541 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(EndReg)) 2542 EndReg = getDRegFromQReg(EndReg) + 1; 2543 // If the register is the same as the start reg, there's nothing 2544 // more to do. 2545 if (Reg == EndReg) 2546 continue; 2547 // The register must be in the same register class as the first. 2548 if (!ARMMCRegisterClasses[ARM::DPRRegClassID].contains(EndReg)) { 2549 Error(EndLoc, "invalid register in register list"); 2550 return MatchOperand_ParseFail; 2551 } 2552 // Ranges must go from low to high. 2553 if (Reg > EndReg) { 2554 Error(EndLoc, "bad range in register list"); 2555 return MatchOperand_ParseFail; 2556 } 2557 2558 // Add all the registers in the range to the register list. 2559 Count += EndReg - Reg; 2560 Reg = EndReg; 2561 continue; 2562 } 2563 Parser.Lex(); // Eat the comma. 2564 RegLoc = Parser.getTok().getLoc(); 2565 int OldReg = Reg; 2566 Reg = tryParseRegister(); 2567 if (Reg == -1) { 2568 Error(RegLoc, "register expected"); 2569 return MatchOperand_ParseFail; 2570 } 2571 // vector register lists must be contiguous. 2572 // It's OK to use the enumeration values directly here rather, as the 2573 // VFP register classes have the enum sorted properly. 2574 // 2575 // The list is of D registers, but we also allow Q regs and just interpret 2576 // them as the two D sub-registers. 2577 if (ARMMCRegisterClasses[ARM::QPRRegClassID].contains(Reg)) { 2578 Reg = getDRegFromQReg(Reg); 2579 if (Reg != OldReg + 1) { 2580 Error(RegLoc, "non-contiguous register range"); 2581 return MatchOperand_ParseFail; 2582 } 2583 ++Reg; 2584 Count += 2; 2585 continue; 2586 } 2587 // Normal D register. Just check that it's contiguous and keep going. 2588 if (Reg != OldReg + 1) { 2589 Error(RegLoc, "non-contiguous register range"); 2590 return MatchOperand_ParseFail; 2591 } 2592 ++Count; 2593 } 2594 2595 SMLoc E = Parser.getTok().getLoc(); 2596 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2597 Error(E, "'}' expected"); 2598 return MatchOperand_ParseFail; 2599 } 2600 Parser.Lex(); // Eat '}' token. 2601 2602 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2603 return MatchOperand_Success; 2604} 2605 2606/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2607ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2608parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2609 SMLoc S = Parser.getTok().getLoc(); 2610 const AsmToken &Tok = Parser.getTok(); 2611 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2612 StringRef OptStr = Tok.getString(); 2613 2614 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2615 .Case("sy", ARM_MB::SY) 2616 .Case("st", ARM_MB::ST) 2617 .Case("sh", ARM_MB::ISH) 2618 .Case("ish", ARM_MB::ISH) 2619 .Case("shst", ARM_MB::ISHST) 2620 .Case("ishst", ARM_MB::ISHST) 2621 .Case("nsh", ARM_MB::NSH) 2622 .Case("un", ARM_MB::NSH) 2623 .Case("nshst", ARM_MB::NSHST) 2624 .Case("unst", ARM_MB::NSHST) 2625 .Case("osh", ARM_MB::OSH) 2626 .Case("oshst", ARM_MB::OSHST) 2627 .Default(~0U); 2628 2629 if (Opt == ~0U) 2630 return MatchOperand_NoMatch; 2631 2632 Parser.Lex(); // Eat identifier token. 2633 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2634 return MatchOperand_Success; 2635} 2636 2637/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2638ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2639parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2640 SMLoc S = Parser.getTok().getLoc(); 2641 const AsmToken &Tok = Parser.getTok(); 2642 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2643 StringRef IFlagsStr = Tok.getString(); 2644 2645 // An iflags string of "none" is interpreted to mean that none of the AIF 2646 // bits are set. Not a terribly useful instruction, but a valid encoding. 2647 unsigned IFlags = 0; 2648 if (IFlagsStr != "none") { 2649 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2650 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2651 .Case("a", ARM_PROC::A) 2652 .Case("i", ARM_PROC::I) 2653 .Case("f", ARM_PROC::F) 2654 .Default(~0U); 2655 2656 // If some specific iflag is already set, it means that some letter is 2657 // present more than once, this is not acceptable. 2658 if (Flag == ~0U || (IFlags & Flag)) 2659 return MatchOperand_NoMatch; 2660 2661 IFlags |= Flag; 2662 } 2663 } 2664 2665 Parser.Lex(); // Eat identifier token. 2666 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2667 return MatchOperand_Success; 2668} 2669 2670/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2671ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2672parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2673 SMLoc S = Parser.getTok().getLoc(); 2674 const AsmToken &Tok = Parser.getTok(); 2675 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2676 StringRef Mask = Tok.getString(); 2677 2678 if (isMClass()) { 2679 // See ARMv6-M 10.1.1 2680 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2681 .Case("apsr", 0) 2682 .Case("iapsr", 1) 2683 .Case("eapsr", 2) 2684 .Case("xpsr", 3) 2685 .Case("ipsr", 5) 2686 .Case("epsr", 6) 2687 .Case("iepsr", 7) 2688 .Case("msp", 8) 2689 .Case("psp", 9) 2690 .Case("primask", 16) 2691 .Case("basepri", 17) 2692 .Case("basepri_max", 18) 2693 .Case("faultmask", 19) 2694 .Case("control", 20) 2695 .Default(~0U); 2696 2697 if (FlagsVal == ~0U) 2698 return MatchOperand_NoMatch; 2699 2700 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2701 // basepri, basepri_max and faultmask only valid for V7m. 2702 return MatchOperand_NoMatch; 2703 2704 Parser.Lex(); // Eat identifier token. 2705 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2706 return MatchOperand_Success; 2707 } 2708 2709 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2710 size_t Start = 0, Next = Mask.find('_'); 2711 StringRef Flags = ""; 2712 std::string SpecReg = Mask.slice(Start, Next).lower(); 2713 if (Next != StringRef::npos) 2714 Flags = Mask.slice(Next+1, Mask.size()); 2715 2716 // FlagsVal contains the complete mask: 2717 // 3-0: Mask 2718 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2719 unsigned FlagsVal = 0; 2720 2721 if (SpecReg == "apsr") { 2722 FlagsVal = StringSwitch<unsigned>(Flags) 2723 .Case("nzcvq", 0x8) // same as CPSR_f 2724 .Case("g", 0x4) // same as CPSR_s 2725 .Case("nzcvqg", 0xc) // same as CPSR_fs 2726 .Default(~0U); 2727 2728 if (FlagsVal == ~0U) { 2729 if (!Flags.empty()) 2730 return MatchOperand_NoMatch; 2731 else 2732 FlagsVal = 8; // No flag 2733 } 2734 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2735 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2736 Flags = "fc"; 2737 for (int i = 0, e = Flags.size(); i != e; ++i) { 2738 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2739 .Case("c", 1) 2740 .Case("x", 2) 2741 .Case("s", 4) 2742 .Case("f", 8) 2743 .Default(~0U); 2744 2745 // If some specific flag is already set, it means that some letter is 2746 // present more than once, this is not acceptable. 2747 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2748 return MatchOperand_NoMatch; 2749 FlagsVal |= Flag; 2750 } 2751 } else // No match for special register. 2752 return MatchOperand_NoMatch; 2753 2754 // Special register without flags is NOT equivalent to "fc" flags. 2755 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2756 // two lines would enable gas compatibility at the expense of breaking 2757 // round-tripping. 2758 // 2759 // if (!FlagsVal) 2760 // FlagsVal = 0x9; 2761 2762 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2763 if (SpecReg == "spsr") 2764 FlagsVal |= 16; 2765 2766 Parser.Lex(); // Eat identifier token. 2767 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2768 return MatchOperand_Success; 2769} 2770 2771ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2772parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2773 int Low, int High) { 2774 const AsmToken &Tok = Parser.getTok(); 2775 if (Tok.isNot(AsmToken::Identifier)) { 2776 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2777 return MatchOperand_ParseFail; 2778 } 2779 StringRef ShiftName = Tok.getString(); 2780 std::string LowerOp = Op.lower(); 2781 std::string UpperOp = Op.upper(); 2782 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2783 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2784 return MatchOperand_ParseFail; 2785 } 2786 Parser.Lex(); // Eat shift type token. 2787 2788 // There must be a '#' and a shift amount. 2789 if (Parser.getTok().isNot(AsmToken::Hash)) { 2790 Error(Parser.getTok().getLoc(), "'#' expected"); 2791 return MatchOperand_ParseFail; 2792 } 2793 Parser.Lex(); // Eat hash token. 2794 2795 const MCExpr *ShiftAmount; 2796 SMLoc Loc = Parser.getTok().getLoc(); 2797 if (getParser().ParseExpression(ShiftAmount)) { 2798 Error(Loc, "illegal expression"); 2799 return MatchOperand_ParseFail; 2800 } 2801 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2802 if (!CE) { 2803 Error(Loc, "constant expression expected"); 2804 return MatchOperand_ParseFail; 2805 } 2806 int Val = CE->getValue(); 2807 if (Val < Low || Val > High) { 2808 Error(Loc, "immediate value out of range"); 2809 return MatchOperand_ParseFail; 2810 } 2811 2812 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2813 2814 return MatchOperand_Success; 2815} 2816 2817ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2818parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2819 const AsmToken &Tok = Parser.getTok(); 2820 SMLoc S = Tok.getLoc(); 2821 if (Tok.isNot(AsmToken::Identifier)) { 2822 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2823 return MatchOperand_ParseFail; 2824 } 2825 int Val = StringSwitch<int>(Tok.getString()) 2826 .Case("be", 1) 2827 .Case("le", 0) 2828 .Default(-1); 2829 Parser.Lex(); // Eat the token. 2830 2831 if (Val == -1) { 2832 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2833 return MatchOperand_ParseFail; 2834 } 2835 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2836 getContext()), 2837 S, Parser.getTok().getLoc())); 2838 return MatchOperand_Success; 2839} 2840 2841/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2842/// instructions. Legal values are: 2843/// lsl #n 'n' in [0,31] 2844/// asr #n 'n' in [1,32] 2845/// n == 32 encoded as n == 0. 2846ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2847parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2848 const AsmToken &Tok = Parser.getTok(); 2849 SMLoc S = Tok.getLoc(); 2850 if (Tok.isNot(AsmToken::Identifier)) { 2851 Error(S, "shift operator 'asr' or 'lsl' expected"); 2852 return MatchOperand_ParseFail; 2853 } 2854 StringRef ShiftName = Tok.getString(); 2855 bool isASR; 2856 if (ShiftName == "lsl" || ShiftName == "LSL") 2857 isASR = false; 2858 else if (ShiftName == "asr" || ShiftName == "ASR") 2859 isASR = true; 2860 else { 2861 Error(S, "shift operator 'asr' or 'lsl' expected"); 2862 return MatchOperand_ParseFail; 2863 } 2864 Parser.Lex(); // Eat the operator. 2865 2866 // A '#' and a shift amount. 2867 if (Parser.getTok().isNot(AsmToken::Hash)) { 2868 Error(Parser.getTok().getLoc(), "'#' expected"); 2869 return MatchOperand_ParseFail; 2870 } 2871 Parser.Lex(); // Eat hash token. 2872 2873 const MCExpr *ShiftAmount; 2874 SMLoc E = Parser.getTok().getLoc(); 2875 if (getParser().ParseExpression(ShiftAmount)) { 2876 Error(E, "malformed shift expression"); 2877 return MatchOperand_ParseFail; 2878 } 2879 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2880 if (!CE) { 2881 Error(E, "shift amount must be an immediate"); 2882 return MatchOperand_ParseFail; 2883 } 2884 2885 int64_t Val = CE->getValue(); 2886 if (isASR) { 2887 // Shift amount must be in [1,32] 2888 if (Val < 1 || Val > 32) { 2889 Error(E, "'asr' shift amount must be in range [1,32]"); 2890 return MatchOperand_ParseFail; 2891 } 2892 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2893 if (isThumb() && Val == 32) { 2894 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2895 return MatchOperand_ParseFail; 2896 } 2897 if (Val == 32) Val = 0; 2898 } else { 2899 // Shift amount must be in [1,32] 2900 if (Val < 0 || Val > 31) { 2901 Error(E, "'lsr' shift amount must be in range [0,31]"); 2902 return MatchOperand_ParseFail; 2903 } 2904 } 2905 2906 E = Parser.getTok().getLoc(); 2907 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2908 2909 return MatchOperand_Success; 2910} 2911 2912/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2913/// of instructions. Legal values are: 2914/// ror #n 'n' in {0, 8, 16, 24} 2915ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2916parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2917 const AsmToken &Tok = Parser.getTok(); 2918 SMLoc S = Tok.getLoc(); 2919 if (Tok.isNot(AsmToken::Identifier)) 2920 return MatchOperand_NoMatch; 2921 StringRef ShiftName = Tok.getString(); 2922 if (ShiftName != "ror" && ShiftName != "ROR") 2923 return MatchOperand_NoMatch; 2924 Parser.Lex(); // Eat the operator. 2925 2926 // A '#' and a rotate amount. 2927 if (Parser.getTok().isNot(AsmToken::Hash)) { 2928 Error(Parser.getTok().getLoc(), "'#' expected"); 2929 return MatchOperand_ParseFail; 2930 } 2931 Parser.Lex(); // Eat hash token. 2932 2933 const MCExpr *ShiftAmount; 2934 SMLoc E = Parser.getTok().getLoc(); 2935 if (getParser().ParseExpression(ShiftAmount)) { 2936 Error(E, "malformed rotate expression"); 2937 return MatchOperand_ParseFail; 2938 } 2939 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2940 if (!CE) { 2941 Error(E, "rotate amount must be an immediate"); 2942 return MatchOperand_ParseFail; 2943 } 2944 2945 int64_t Val = CE->getValue(); 2946 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2947 // normally, zero is represented in asm by omitting the rotate operand 2948 // entirely. 2949 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2950 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2951 return MatchOperand_ParseFail; 2952 } 2953 2954 E = Parser.getTok().getLoc(); 2955 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2956 2957 return MatchOperand_Success; 2958} 2959 2960ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2961parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2962 SMLoc S = Parser.getTok().getLoc(); 2963 // The bitfield descriptor is really two operands, the LSB and the width. 2964 if (Parser.getTok().isNot(AsmToken::Hash)) { 2965 Error(Parser.getTok().getLoc(), "'#' expected"); 2966 return MatchOperand_ParseFail; 2967 } 2968 Parser.Lex(); // Eat hash token. 2969 2970 const MCExpr *LSBExpr; 2971 SMLoc E = Parser.getTok().getLoc(); 2972 if (getParser().ParseExpression(LSBExpr)) { 2973 Error(E, "malformed immediate expression"); 2974 return MatchOperand_ParseFail; 2975 } 2976 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2977 if (!CE) { 2978 Error(E, "'lsb' operand must be an immediate"); 2979 return MatchOperand_ParseFail; 2980 } 2981 2982 int64_t LSB = CE->getValue(); 2983 // The LSB must be in the range [0,31] 2984 if (LSB < 0 || LSB > 31) { 2985 Error(E, "'lsb' operand must be in the range [0,31]"); 2986 return MatchOperand_ParseFail; 2987 } 2988 E = Parser.getTok().getLoc(); 2989 2990 // Expect another immediate operand. 2991 if (Parser.getTok().isNot(AsmToken::Comma)) { 2992 Error(Parser.getTok().getLoc(), "too few operands"); 2993 return MatchOperand_ParseFail; 2994 } 2995 Parser.Lex(); // Eat hash token. 2996 if (Parser.getTok().isNot(AsmToken::Hash)) { 2997 Error(Parser.getTok().getLoc(), "'#' expected"); 2998 return MatchOperand_ParseFail; 2999 } 3000 Parser.Lex(); // Eat hash token. 3001 3002 const MCExpr *WidthExpr; 3003 if (getParser().ParseExpression(WidthExpr)) { 3004 Error(E, "malformed immediate expression"); 3005 return MatchOperand_ParseFail; 3006 } 3007 CE = dyn_cast<MCConstantExpr>(WidthExpr); 3008 if (!CE) { 3009 Error(E, "'width' operand must be an immediate"); 3010 return MatchOperand_ParseFail; 3011 } 3012 3013 int64_t Width = CE->getValue(); 3014 // The LSB must be in the range [1,32-lsb] 3015 if (Width < 1 || Width > 32 - LSB) { 3016 Error(E, "'width' operand must be in the range [1,32-lsb]"); 3017 return MatchOperand_ParseFail; 3018 } 3019 E = Parser.getTok().getLoc(); 3020 3021 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 3022 3023 return MatchOperand_Success; 3024} 3025 3026ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3027parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3028 // Check for a post-index addressing register operand. Specifically: 3029 // postidx_reg := '+' register {, shift} 3030 // | '-' register {, shift} 3031 // | register {, shift} 3032 3033 // This method must return MatchOperand_NoMatch without consuming any tokens 3034 // in the case where there is no match, as other alternatives take other 3035 // parse methods. 3036 AsmToken Tok = Parser.getTok(); 3037 SMLoc S = Tok.getLoc(); 3038 bool haveEaten = false; 3039 bool isAdd = true; 3040 int Reg = -1; 3041 if (Tok.is(AsmToken::Plus)) { 3042 Parser.Lex(); // Eat the '+' token. 3043 haveEaten = true; 3044 } else if (Tok.is(AsmToken::Minus)) { 3045 Parser.Lex(); // Eat the '-' token. 3046 isAdd = false; 3047 haveEaten = true; 3048 } 3049 if (Parser.getTok().is(AsmToken::Identifier)) 3050 Reg = tryParseRegister(); 3051 if (Reg == -1) { 3052 if (!haveEaten) 3053 return MatchOperand_NoMatch; 3054 Error(Parser.getTok().getLoc(), "register expected"); 3055 return MatchOperand_ParseFail; 3056 } 3057 SMLoc E = Parser.getTok().getLoc(); 3058 3059 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 3060 unsigned ShiftImm = 0; 3061 if (Parser.getTok().is(AsmToken::Comma)) { 3062 Parser.Lex(); // Eat the ','. 3063 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 3064 return MatchOperand_ParseFail; 3065 } 3066 3067 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 3068 ShiftImm, S, E)); 3069 3070 return MatchOperand_Success; 3071} 3072 3073ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3074parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3075 // Check for a post-index addressing register operand. Specifically: 3076 // am3offset := '+' register 3077 // | '-' register 3078 // | register 3079 // | # imm 3080 // | # + imm 3081 // | # - imm 3082 3083 // This method must return MatchOperand_NoMatch without consuming any tokens 3084 // in the case where there is no match, as other alternatives take other 3085 // parse methods. 3086 AsmToken Tok = Parser.getTok(); 3087 SMLoc S = Tok.getLoc(); 3088 3089 // Do immediates first, as we always parse those if we have a '#'. 3090 if (Parser.getTok().is(AsmToken::Hash)) { 3091 Parser.Lex(); // Eat the '#'. 3092 // Explicitly look for a '-', as we need to encode negative zero 3093 // differently. 3094 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3095 const MCExpr *Offset; 3096 if (getParser().ParseExpression(Offset)) 3097 return MatchOperand_ParseFail; 3098 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3099 if (!CE) { 3100 Error(S, "constant expression expected"); 3101 return MatchOperand_ParseFail; 3102 } 3103 SMLoc E = Tok.getLoc(); 3104 // Negative zero is encoded as the flag value INT32_MIN. 3105 int32_t Val = CE->getValue(); 3106 if (isNegative && Val == 0) 3107 Val = INT32_MIN; 3108 3109 Operands.push_back( 3110 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3111 3112 return MatchOperand_Success; 3113 } 3114 3115 3116 bool haveEaten = false; 3117 bool isAdd = true; 3118 int Reg = -1; 3119 if (Tok.is(AsmToken::Plus)) { 3120 Parser.Lex(); // Eat the '+' token. 3121 haveEaten = true; 3122 } else if (Tok.is(AsmToken::Minus)) { 3123 Parser.Lex(); // Eat the '-' token. 3124 isAdd = false; 3125 haveEaten = true; 3126 } 3127 if (Parser.getTok().is(AsmToken::Identifier)) 3128 Reg = tryParseRegister(); 3129 if (Reg == -1) { 3130 if (!haveEaten) 3131 return MatchOperand_NoMatch; 3132 Error(Parser.getTok().getLoc(), "register expected"); 3133 return MatchOperand_ParseFail; 3134 } 3135 SMLoc E = Parser.getTok().getLoc(); 3136 3137 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3138 0, S, E)); 3139 3140 return MatchOperand_Success; 3141} 3142 3143/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3144/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3145/// when they refer multiple MIOperands inside a single one. 3146bool ARMAsmParser:: 3147cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3148 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3149 // Rt, Rt2 3150 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3151 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3152 // Create a writeback register dummy placeholder. 3153 Inst.addOperand(MCOperand::CreateReg(0)); 3154 // addr 3155 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3156 // pred 3157 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3158 return true; 3159} 3160 3161/// cvtT2StrdPre - Convert parsed operands to MCInst. 3162/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3163/// when they refer multiple MIOperands inside a single one. 3164bool ARMAsmParser:: 3165cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3166 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3167 // Create a writeback register dummy placeholder. 3168 Inst.addOperand(MCOperand::CreateReg(0)); 3169 // Rt, Rt2 3170 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3171 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3172 // addr 3173 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3174 // pred 3175 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3176 return true; 3177} 3178 3179/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3180/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3181/// when they refer multiple MIOperands inside a single one. 3182bool ARMAsmParser:: 3183cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3184 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3185 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3186 3187 // Create a writeback register dummy placeholder. 3188 Inst.addOperand(MCOperand::CreateImm(0)); 3189 3190 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3191 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3192 return true; 3193} 3194 3195/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3196/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3197/// when they refer multiple MIOperands inside a single one. 3198bool ARMAsmParser:: 3199cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3200 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3201 // Create a writeback register dummy placeholder. 3202 Inst.addOperand(MCOperand::CreateImm(0)); 3203 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3204 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3205 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3206 return true; 3207} 3208 3209/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3210/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3211/// when they refer multiple MIOperands inside a single one. 3212bool ARMAsmParser:: 3213cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3214 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3215 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3216 3217 // Create a writeback register dummy placeholder. 3218 Inst.addOperand(MCOperand::CreateImm(0)); 3219 3220 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3221 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3222 return true; 3223} 3224 3225/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3226/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3227/// when they refer multiple MIOperands inside a single one. 3228bool ARMAsmParser:: 3229cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3230 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3231 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3232 3233 // Create a writeback register dummy placeholder. 3234 Inst.addOperand(MCOperand::CreateImm(0)); 3235 3236 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3237 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3238 return true; 3239} 3240 3241 3242/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3243/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3244/// when they refer multiple MIOperands inside a single one. 3245bool ARMAsmParser:: 3246cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3247 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3248 // Create a writeback register dummy placeholder. 3249 Inst.addOperand(MCOperand::CreateImm(0)); 3250 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3251 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3252 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3253 return true; 3254} 3255 3256/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3257/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3258/// when they refer multiple MIOperands inside a single one. 3259bool ARMAsmParser:: 3260cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3261 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3262 // Create a writeback register dummy placeholder. 3263 Inst.addOperand(MCOperand::CreateImm(0)); 3264 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3265 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3266 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3267 return true; 3268} 3269 3270/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3271/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3272/// when they refer multiple MIOperands inside a single one. 3273bool ARMAsmParser:: 3274cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3275 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3276 // Create a writeback register dummy placeholder. 3277 Inst.addOperand(MCOperand::CreateImm(0)); 3278 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3279 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3280 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3281 return true; 3282} 3283 3284/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3285/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3286/// when they refer multiple MIOperands inside a single one. 3287bool ARMAsmParser:: 3288cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3289 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3290 // Rt 3291 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3292 // Create a writeback register dummy placeholder. 3293 Inst.addOperand(MCOperand::CreateImm(0)); 3294 // addr 3295 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3296 // offset 3297 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3298 // pred 3299 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3300 return true; 3301} 3302 3303/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3304/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3305/// when they refer multiple MIOperands inside a single one. 3306bool ARMAsmParser:: 3307cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3308 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3309 // Rt 3310 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3311 // Create a writeback register dummy placeholder. 3312 Inst.addOperand(MCOperand::CreateImm(0)); 3313 // addr 3314 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3315 // offset 3316 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3317 // pred 3318 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3319 return true; 3320} 3321 3322/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3323/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3324/// when they refer multiple MIOperands inside a single one. 3325bool ARMAsmParser:: 3326cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3327 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3328 // Create a writeback register dummy placeholder. 3329 Inst.addOperand(MCOperand::CreateImm(0)); 3330 // Rt 3331 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3332 // addr 3333 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3334 // offset 3335 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3336 // pred 3337 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3338 return true; 3339} 3340 3341/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3342/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3343/// when they refer multiple MIOperands inside a single one. 3344bool ARMAsmParser:: 3345cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3346 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3347 // Create a writeback register dummy placeholder. 3348 Inst.addOperand(MCOperand::CreateImm(0)); 3349 // Rt 3350 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3351 // addr 3352 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3353 // offset 3354 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3355 // pred 3356 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3357 return true; 3358} 3359 3360/// cvtLdrdPre - Convert parsed operands to MCInst. 3361/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3362/// when they refer multiple MIOperands inside a single one. 3363bool ARMAsmParser:: 3364cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3365 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3366 // Rt, Rt2 3367 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3368 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3369 // Create a writeback register dummy placeholder. 3370 Inst.addOperand(MCOperand::CreateImm(0)); 3371 // addr 3372 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3373 // pred 3374 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3375 return true; 3376} 3377 3378/// cvtStrdPre - Convert parsed operands to MCInst. 3379/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3380/// when they refer multiple MIOperands inside a single one. 3381bool ARMAsmParser:: 3382cvtStrdPre(MCInst &Inst, unsigned Opcode, 3383 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3384 // Create a writeback register dummy placeholder. 3385 Inst.addOperand(MCOperand::CreateImm(0)); 3386 // Rt, Rt2 3387 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3388 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3389 // addr 3390 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3391 // pred 3392 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3393 return true; 3394} 3395 3396/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3397/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3398/// when they refer multiple MIOperands inside a single one. 3399bool ARMAsmParser:: 3400cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3401 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3402 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3403 // Create a writeback register dummy placeholder. 3404 Inst.addOperand(MCOperand::CreateImm(0)); 3405 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3406 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3407 return true; 3408} 3409 3410/// cvtThumbMultiple- Convert parsed operands to MCInst. 3411/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3412/// when they refer multiple MIOperands inside a single one. 3413bool ARMAsmParser:: 3414cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3415 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3416 // The second source operand must be the same register as the destination 3417 // operand. 3418 if (Operands.size() == 6 && 3419 (((ARMOperand*)Operands[3])->getReg() != 3420 ((ARMOperand*)Operands[5])->getReg()) && 3421 (((ARMOperand*)Operands[3])->getReg() != 3422 ((ARMOperand*)Operands[4])->getReg())) { 3423 Error(Operands[3]->getStartLoc(), 3424 "destination register must match source register"); 3425 return false; 3426 } 3427 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3428 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3429 // If we have a three-operand form, make sure to set Rn to be the operand 3430 // that isn't the same as Rd. 3431 unsigned RegOp = 4; 3432 if (Operands.size() == 6 && 3433 ((ARMOperand*)Operands[4])->getReg() == 3434 ((ARMOperand*)Operands[3])->getReg()) 3435 RegOp = 5; 3436 ((ARMOperand*)Operands[RegOp])->addRegOperands(Inst, 1); 3437 Inst.addOperand(Inst.getOperand(0)); 3438 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3439 3440 return true; 3441} 3442 3443bool ARMAsmParser:: 3444cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3445 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3446 // Vd 3447 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3448 // Create a writeback register dummy placeholder. 3449 Inst.addOperand(MCOperand::CreateImm(0)); 3450 // Vn 3451 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3452 // pred 3453 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3454 return true; 3455} 3456 3457bool ARMAsmParser:: 3458cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3459 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3460 // Vd 3461 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3462 // Create a writeback register dummy placeholder. 3463 Inst.addOperand(MCOperand::CreateImm(0)); 3464 // Vn 3465 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3466 // Vm 3467 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3468 // pred 3469 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3470 return true; 3471} 3472 3473bool ARMAsmParser:: 3474cvtVSTwbFixed(MCInst &Inst, unsigned Opcode, 3475 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3476 // Create a writeback register dummy placeholder. 3477 Inst.addOperand(MCOperand::CreateImm(0)); 3478 // Vn 3479 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3480 // Vt 3481 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3482 // pred 3483 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3484 return true; 3485} 3486 3487bool ARMAsmParser:: 3488cvtVSTwbRegister(MCInst &Inst, unsigned Opcode, 3489 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3490 // Create a writeback register dummy placeholder. 3491 Inst.addOperand(MCOperand::CreateImm(0)); 3492 // Vn 3493 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3494 // Vm 3495 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3496 // Vt 3497 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3498 // pred 3499 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3500 return true; 3501} 3502 3503/// Parse an ARM memory expression, return false if successful else return true 3504/// or an error. The first token must be a '[' when called. 3505bool ARMAsmParser:: 3506parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3507 SMLoc S, E; 3508 assert(Parser.getTok().is(AsmToken::LBrac) && 3509 "Token is not a Left Bracket"); 3510 S = Parser.getTok().getLoc(); 3511 Parser.Lex(); // Eat left bracket token. 3512 3513 const AsmToken &BaseRegTok = Parser.getTok(); 3514 int BaseRegNum = tryParseRegister(); 3515 if (BaseRegNum == -1) 3516 return Error(BaseRegTok.getLoc(), "register expected"); 3517 3518 // The next token must either be a comma or a closing bracket. 3519 const AsmToken &Tok = Parser.getTok(); 3520 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3521 return Error(Tok.getLoc(), "malformed memory operand"); 3522 3523 if (Tok.is(AsmToken::RBrac)) { 3524 E = Tok.getLoc(); 3525 Parser.Lex(); // Eat right bracket token. 3526 3527 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3528 0, 0, false, S, E)); 3529 3530 // If there's a pre-indexing writeback marker, '!', just add it as a token 3531 // operand. It's rather odd, but syntactically valid. 3532 if (Parser.getTok().is(AsmToken::Exclaim)) { 3533 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3534 Parser.Lex(); // Eat the '!'. 3535 } 3536 3537 return false; 3538 } 3539 3540 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3541 Parser.Lex(); // Eat the comma. 3542 3543 // If we have a ':', it's an alignment specifier. 3544 if (Parser.getTok().is(AsmToken::Colon)) { 3545 Parser.Lex(); // Eat the ':'. 3546 E = Parser.getTok().getLoc(); 3547 3548 const MCExpr *Expr; 3549 if (getParser().ParseExpression(Expr)) 3550 return true; 3551 3552 // The expression has to be a constant. Memory references with relocations 3553 // don't come through here, as they use the <label> forms of the relevant 3554 // instructions. 3555 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3556 if (!CE) 3557 return Error (E, "constant expression expected"); 3558 3559 unsigned Align = 0; 3560 switch (CE->getValue()) { 3561 default: 3562 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3563 case 64: Align = 8; break; 3564 case 128: Align = 16; break; 3565 case 256: Align = 32; break; 3566 } 3567 3568 // Now we should have the closing ']' 3569 E = Parser.getTok().getLoc(); 3570 if (Parser.getTok().isNot(AsmToken::RBrac)) 3571 return Error(E, "']' expected"); 3572 Parser.Lex(); // Eat right bracket token. 3573 3574 // Don't worry about range checking the value here. That's handled by 3575 // the is*() predicates. 3576 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3577 ARM_AM::no_shift, 0, Align, 3578 false, S, E)); 3579 3580 // If there's a pre-indexing writeback marker, '!', just add it as a token 3581 // operand. 3582 if (Parser.getTok().is(AsmToken::Exclaim)) { 3583 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3584 Parser.Lex(); // Eat the '!'. 3585 } 3586 3587 return false; 3588 } 3589 3590 // If we have a '#', it's an immediate offset, else assume it's a register 3591 // offset. Be friendly and also accept a plain integer (without a leading 3592 // hash) for gas compatibility. 3593 if (Parser.getTok().is(AsmToken::Hash) || 3594 Parser.getTok().is(AsmToken::Integer)) { 3595 if (Parser.getTok().is(AsmToken::Hash)) 3596 Parser.Lex(); // Eat the '#'. 3597 E = Parser.getTok().getLoc(); 3598 3599 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3600 const MCExpr *Offset; 3601 if (getParser().ParseExpression(Offset)) 3602 return true; 3603 3604 // The expression has to be a constant. Memory references with relocations 3605 // don't come through here, as they use the <label> forms of the relevant 3606 // instructions. 3607 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3608 if (!CE) 3609 return Error (E, "constant expression expected"); 3610 3611 // If the constant was #-0, represent it as INT32_MIN. 3612 int32_t Val = CE->getValue(); 3613 if (isNegative && Val == 0) 3614 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3615 3616 // Now we should have the closing ']' 3617 E = Parser.getTok().getLoc(); 3618 if (Parser.getTok().isNot(AsmToken::RBrac)) 3619 return Error(E, "']' expected"); 3620 Parser.Lex(); // Eat right bracket token. 3621 3622 // Don't worry about range checking the value here. That's handled by 3623 // the is*() predicates. 3624 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3625 ARM_AM::no_shift, 0, 0, 3626 false, S, E)); 3627 3628 // If there's a pre-indexing writeback marker, '!', just add it as a token 3629 // operand. 3630 if (Parser.getTok().is(AsmToken::Exclaim)) { 3631 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3632 Parser.Lex(); // Eat the '!'. 3633 } 3634 3635 return false; 3636 } 3637 3638 // The register offset is optionally preceded by a '+' or '-' 3639 bool isNegative = false; 3640 if (Parser.getTok().is(AsmToken::Minus)) { 3641 isNegative = true; 3642 Parser.Lex(); // Eat the '-'. 3643 } else if (Parser.getTok().is(AsmToken::Plus)) { 3644 // Nothing to do. 3645 Parser.Lex(); // Eat the '+'. 3646 } 3647 3648 E = Parser.getTok().getLoc(); 3649 int OffsetRegNum = tryParseRegister(); 3650 if (OffsetRegNum == -1) 3651 return Error(E, "register expected"); 3652 3653 // If there's a shift operator, handle it. 3654 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3655 unsigned ShiftImm = 0; 3656 if (Parser.getTok().is(AsmToken::Comma)) { 3657 Parser.Lex(); // Eat the ','. 3658 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3659 return true; 3660 } 3661 3662 // Now we should have the closing ']' 3663 E = Parser.getTok().getLoc(); 3664 if (Parser.getTok().isNot(AsmToken::RBrac)) 3665 return Error(E, "']' expected"); 3666 Parser.Lex(); // Eat right bracket token. 3667 3668 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3669 ShiftType, ShiftImm, 0, isNegative, 3670 S, E)); 3671 3672 // If there's a pre-indexing writeback marker, '!', just add it as a token 3673 // operand. 3674 if (Parser.getTok().is(AsmToken::Exclaim)) { 3675 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3676 Parser.Lex(); // Eat the '!'. 3677 } 3678 3679 return false; 3680} 3681 3682/// parseMemRegOffsetShift - one of these two: 3683/// ( lsl | lsr | asr | ror ) , # shift_amount 3684/// rrx 3685/// return true if it parses a shift otherwise it returns false. 3686bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3687 unsigned &Amount) { 3688 SMLoc Loc = Parser.getTok().getLoc(); 3689 const AsmToken &Tok = Parser.getTok(); 3690 if (Tok.isNot(AsmToken::Identifier)) 3691 return true; 3692 StringRef ShiftName = Tok.getString(); 3693 if (ShiftName == "lsl" || ShiftName == "LSL") 3694 St = ARM_AM::lsl; 3695 else if (ShiftName == "lsr" || ShiftName == "LSR") 3696 St = ARM_AM::lsr; 3697 else if (ShiftName == "asr" || ShiftName == "ASR") 3698 St = ARM_AM::asr; 3699 else if (ShiftName == "ror" || ShiftName == "ROR") 3700 St = ARM_AM::ror; 3701 else if (ShiftName == "rrx" || ShiftName == "RRX") 3702 St = ARM_AM::rrx; 3703 else 3704 return Error(Loc, "illegal shift operator"); 3705 Parser.Lex(); // Eat shift type token. 3706 3707 // rrx stands alone. 3708 Amount = 0; 3709 if (St != ARM_AM::rrx) { 3710 Loc = Parser.getTok().getLoc(); 3711 // A '#' and a shift amount. 3712 const AsmToken &HashTok = Parser.getTok(); 3713 if (HashTok.isNot(AsmToken::Hash)) 3714 return Error(HashTok.getLoc(), "'#' expected"); 3715 Parser.Lex(); // Eat hash token. 3716 3717 const MCExpr *Expr; 3718 if (getParser().ParseExpression(Expr)) 3719 return true; 3720 // Range check the immediate. 3721 // lsl, ror: 0 <= imm <= 31 3722 // lsr, asr: 0 <= imm <= 32 3723 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3724 if (!CE) 3725 return Error(Loc, "shift amount must be an immediate"); 3726 int64_t Imm = CE->getValue(); 3727 if (Imm < 0 || 3728 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3729 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3730 return Error(Loc, "immediate shift value out of range"); 3731 Amount = Imm; 3732 } 3733 3734 return false; 3735} 3736 3737/// parseFPImm - A floating point immediate expression operand. 3738ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3739parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3740 SMLoc S = Parser.getTok().getLoc(); 3741 3742 if (Parser.getTok().isNot(AsmToken::Hash)) 3743 return MatchOperand_NoMatch; 3744 3745 // Disambiguate the VMOV forms that can accept an FP immediate. 3746 // vmov.f32 <sreg>, #imm 3747 // vmov.f64 <dreg>, #imm 3748 // vmov.f32 <dreg>, #imm @ vector f32x2 3749 // vmov.f32 <qreg>, #imm @ vector f32x4 3750 // 3751 // There are also the NEON VMOV instructions which expect an 3752 // integer constant. Make sure we don't try to parse an FPImm 3753 // for these: 3754 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3755 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3756 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3757 TyOp->getToken() != ".f64")) 3758 return MatchOperand_NoMatch; 3759 3760 Parser.Lex(); // Eat the '#'. 3761 3762 // Handle negation, as that still comes through as a separate token. 3763 bool isNegative = false; 3764 if (Parser.getTok().is(AsmToken::Minus)) { 3765 isNegative = true; 3766 Parser.Lex(); 3767 } 3768 const AsmToken &Tok = Parser.getTok(); 3769 if (Tok.is(AsmToken::Real)) { 3770 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3771 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3772 // If we had a '-' in front, toggle the sign bit. 3773 IntVal ^= (uint64_t)isNegative << 63; 3774 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3775 Parser.Lex(); // Eat the token. 3776 if (Val == -1) { 3777 TokError("floating point value out of range"); 3778 return MatchOperand_ParseFail; 3779 } 3780 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3781 return MatchOperand_Success; 3782 } 3783 if (Tok.is(AsmToken::Integer)) { 3784 int64_t Val = Tok.getIntVal(); 3785 Parser.Lex(); // Eat the token. 3786 if (Val > 255 || Val < 0) { 3787 TokError("encoded floating point value out of range"); 3788 return MatchOperand_ParseFail; 3789 } 3790 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3791 return MatchOperand_Success; 3792 } 3793 3794 TokError("invalid floating point immediate"); 3795 return MatchOperand_ParseFail; 3796} 3797/// Parse a arm instruction operand. For now this parses the operand regardless 3798/// of the mnemonic. 3799bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3800 StringRef Mnemonic) { 3801 SMLoc S, E; 3802 3803 // Check if the current operand has a custom associated parser, if so, try to 3804 // custom parse the operand, or fallback to the general approach. 3805 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3806 if (ResTy == MatchOperand_Success) 3807 return false; 3808 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3809 // there was a match, but an error occurred, in which case, just return that 3810 // the operand parsing failed. 3811 if (ResTy == MatchOperand_ParseFail) 3812 return true; 3813 3814 switch (getLexer().getKind()) { 3815 default: 3816 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3817 return true; 3818 case AsmToken::Identifier: { 3819 // If this is VMRS, check for the apsr_nzcv operand. 3820 if (!tryParseRegisterWithWriteBack(Operands)) 3821 return false; 3822 int Res = tryParseShiftRegister(Operands); 3823 if (Res == 0) // success 3824 return false; 3825 else if (Res == -1) // irrecoverable error 3826 return true; 3827 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3828 S = Parser.getTok().getLoc(); 3829 Parser.Lex(); 3830 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3831 return false; 3832 } 3833 3834 // Fall though for the Identifier case that is not a register or a 3835 // special name. 3836 } 3837 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 3838 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3839 case AsmToken::String: // quoted label names. 3840 case AsmToken::Dot: { // . as a branch target 3841 // This was not a register so parse other operands that start with an 3842 // identifier (like labels) as expressions and create them as immediates. 3843 const MCExpr *IdVal; 3844 S = Parser.getTok().getLoc(); 3845 if (getParser().ParseExpression(IdVal)) 3846 return true; 3847 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3848 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3849 return false; 3850 } 3851 case AsmToken::LBrac: 3852 return parseMemory(Operands); 3853 case AsmToken::LCurly: 3854 return parseRegisterList(Operands); 3855 case AsmToken::Hash: { 3856 // #42 -> immediate. 3857 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3858 S = Parser.getTok().getLoc(); 3859 Parser.Lex(); 3860 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3861 const MCExpr *ImmVal; 3862 if (getParser().ParseExpression(ImmVal)) 3863 return true; 3864 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3865 if (CE) { 3866 int32_t Val = CE->getValue(); 3867 if (isNegative && Val == 0) 3868 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3869 } 3870 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3871 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3872 return false; 3873 } 3874 case AsmToken::Colon: { 3875 // ":lower16:" and ":upper16:" expression prefixes 3876 // FIXME: Check it's an expression prefix, 3877 // e.g. (FOO - :lower16:BAR) isn't legal. 3878 ARMMCExpr::VariantKind RefKind; 3879 if (parsePrefix(RefKind)) 3880 return true; 3881 3882 const MCExpr *SubExprVal; 3883 if (getParser().ParseExpression(SubExprVal)) 3884 return true; 3885 3886 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3887 getContext()); 3888 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3889 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3890 return false; 3891 } 3892 } 3893} 3894 3895// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3896// :lower16: and :upper16:. 3897bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3898 RefKind = ARMMCExpr::VK_ARM_None; 3899 3900 // :lower16: and :upper16: modifiers 3901 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3902 Parser.Lex(); // Eat ':' 3903 3904 if (getLexer().isNot(AsmToken::Identifier)) { 3905 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3906 return true; 3907 } 3908 3909 StringRef IDVal = Parser.getTok().getIdentifier(); 3910 if (IDVal == "lower16") { 3911 RefKind = ARMMCExpr::VK_ARM_LO16; 3912 } else if (IDVal == "upper16") { 3913 RefKind = ARMMCExpr::VK_ARM_HI16; 3914 } else { 3915 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3916 return true; 3917 } 3918 Parser.Lex(); 3919 3920 if (getLexer().isNot(AsmToken::Colon)) { 3921 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3922 return true; 3923 } 3924 Parser.Lex(); // Eat the last ':' 3925 return false; 3926} 3927 3928/// \brief Given a mnemonic, split out possible predication code and carry 3929/// setting letters to form a canonical mnemonic and flags. 3930// 3931// FIXME: Would be nice to autogen this. 3932// FIXME: This is a bit of a maze of special cases. 3933StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3934 unsigned &PredicationCode, 3935 bool &CarrySetting, 3936 unsigned &ProcessorIMod, 3937 StringRef &ITMask) { 3938 PredicationCode = ARMCC::AL; 3939 CarrySetting = false; 3940 ProcessorIMod = 0; 3941 3942 // Ignore some mnemonics we know aren't predicated forms. 3943 // 3944 // FIXME: Would be nice to autogen this. 3945 if ((Mnemonic == "movs" && isThumb()) || 3946 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3947 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3948 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3949 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3950 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3951 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3952 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3953 return Mnemonic; 3954 3955 // First, split out any predication code. Ignore mnemonics we know aren't 3956 // predicated but do have a carry-set and so weren't caught above. 3957 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3958 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3959 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3960 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3961 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3962 .Case("eq", ARMCC::EQ) 3963 .Case("ne", ARMCC::NE) 3964 .Case("hs", ARMCC::HS) 3965 .Case("cs", ARMCC::HS) 3966 .Case("lo", ARMCC::LO) 3967 .Case("cc", ARMCC::LO) 3968 .Case("mi", ARMCC::MI) 3969 .Case("pl", ARMCC::PL) 3970 .Case("vs", ARMCC::VS) 3971 .Case("vc", ARMCC::VC) 3972 .Case("hi", ARMCC::HI) 3973 .Case("ls", ARMCC::LS) 3974 .Case("ge", ARMCC::GE) 3975 .Case("lt", ARMCC::LT) 3976 .Case("gt", ARMCC::GT) 3977 .Case("le", ARMCC::LE) 3978 .Case("al", ARMCC::AL) 3979 .Default(~0U); 3980 if (CC != ~0U) { 3981 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3982 PredicationCode = CC; 3983 } 3984 } 3985 3986 // Next, determine if we have a carry setting bit. We explicitly ignore all 3987 // the instructions we know end in 's'. 3988 if (Mnemonic.endswith("s") && 3989 !(Mnemonic == "cps" || Mnemonic == "mls" || 3990 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3991 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3992 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3993 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3994 (Mnemonic == "movs" && isThumb()))) { 3995 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3996 CarrySetting = true; 3997 } 3998 3999 // The "cps" instruction can have a interrupt mode operand which is glued into 4000 // the mnemonic. Check if this is the case, split it and parse the imod op 4001 if (Mnemonic.startswith("cps")) { 4002 // Split out any imod code. 4003 unsigned IMod = 4004 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 4005 .Case("ie", ARM_PROC::IE) 4006 .Case("id", ARM_PROC::ID) 4007 .Default(~0U); 4008 if (IMod != ~0U) { 4009 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 4010 ProcessorIMod = IMod; 4011 } 4012 } 4013 4014 // The "it" instruction has the condition mask on the end of the mnemonic. 4015 if (Mnemonic.startswith("it")) { 4016 ITMask = Mnemonic.slice(2, Mnemonic.size()); 4017 Mnemonic = Mnemonic.slice(0, 2); 4018 } 4019 4020 return Mnemonic; 4021} 4022 4023/// \brief Given a canonical mnemonic, determine if the instruction ever allows 4024/// inclusion of carry set or predication code operands. 4025// 4026// FIXME: It would be nice to autogen this. 4027void ARMAsmParser:: 4028getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 4029 bool &CanAcceptPredicationCode) { 4030 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 4031 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 4032 Mnemonic == "add" || Mnemonic == "adc" || 4033 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 4034 Mnemonic == "orr" || Mnemonic == "mvn" || 4035 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 4036 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 4037 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 4038 Mnemonic == "mla" || Mnemonic == "smlal" || 4039 Mnemonic == "umlal" || Mnemonic == "umull"))) { 4040 CanAcceptCarrySet = true; 4041 } else 4042 CanAcceptCarrySet = false; 4043 4044 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 4045 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 4046 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 4047 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 4048 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 4049 (Mnemonic == "clrex" && !isThumb()) || 4050 (Mnemonic == "nop" && isThumbOne()) || 4051 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 4052 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 4053 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 4054 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 4055 !isThumb()) || 4056 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 4057 CanAcceptPredicationCode = false; 4058 } else 4059 CanAcceptPredicationCode = true; 4060 4061 if (isThumb()) { 4062 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 4063 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 4064 CanAcceptPredicationCode = false; 4065 } 4066} 4067 4068bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 4069 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4070 // FIXME: This is all horribly hacky. We really need a better way to deal 4071 // with optional operands like this in the matcher table. 4072 4073 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 4074 // another does not. Specifically, the MOVW instruction does not. So we 4075 // special case it here and remove the defaulted (non-setting) cc_out 4076 // operand if that's the instruction we're trying to match. 4077 // 4078 // We do this as post-processing of the explicit operands rather than just 4079 // conditionally adding the cc_out in the first place because we need 4080 // to check the type of the parsed immediate operand. 4081 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 4082 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 4083 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 4084 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4085 return true; 4086 4087 // Register-register 'add' for thumb does not have a cc_out operand 4088 // when there are only two register operands. 4089 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 4090 static_cast<ARMOperand*>(Operands[3])->isReg() && 4091 static_cast<ARMOperand*>(Operands[4])->isReg() && 4092 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4093 return true; 4094 // Register-register 'add' for thumb does not have a cc_out operand 4095 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 4096 // have to check the immediate range here since Thumb2 has a variant 4097 // that can handle a different range and has a cc_out operand. 4098 if (((isThumb() && Mnemonic == "add") || 4099 (isThumbTwo() && Mnemonic == "sub")) && 4100 Operands.size() == 6 && 4101 static_cast<ARMOperand*>(Operands[3])->isReg() && 4102 static_cast<ARMOperand*>(Operands[4])->isReg() && 4103 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 4104 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4105 (static_cast<ARMOperand*>(Operands[5])->isReg() || 4106 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 4107 return true; 4108 // For Thumb2, add/sub immediate does not have a cc_out operand for the 4109 // imm0_4095 variant. That's the least-preferred variant when 4110 // selecting via the generic "add" mnemonic, so to know that we 4111 // should remove the cc_out operand, we have to explicitly check that 4112 // it's not one of the other variants. Ugh. 4113 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 4114 Operands.size() == 6 && 4115 static_cast<ARMOperand*>(Operands[3])->isReg() && 4116 static_cast<ARMOperand*>(Operands[4])->isReg() && 4117 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4118 // Nest conditions rather than one big 'if' statement for readability. 4119 // 4120 // If either register is a high reg, it's either one of the SP 4121 // variants (handled above) or a 32-bit encoding, so we just 4122 // check against T3. 4123 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4124 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 4125 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 4126 return false; 4127 // If both registers are low, we're in an IT block, and the immediate is 4128 // in range, we should use encoding T1 instead, which has a cc_out. 4129 if (inITBlock() && 4130 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 4131 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 4132 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 4133 return false; 4134 4135 // Otherwise, we use encoding T4, which does not have a cc_out 4136 // operand. 4137 return true; 4138 } 4139 4140 // The thumb2 multiply instruction doesn't have a CCOut register, so 4141 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 4142 // use the 16-bit encoding or not. 4143 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4144 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4145 static_cast<ARMOperand*>(Operands[3])->isReg() && 4146 static_cast<ARMOperand*>(Operands[4])->isReg() && 4147 static_cast<ARMOperand*>(Operands[5])->isReg() && 4148 // If the registers aren't low regs, the destination reg isn't the 4149 // same as one of the source regs, or the cc_out operand is zero 4150 // outside of an IT block, we have to use the 32-bit encoding, so 4151 // remove the cc_out operand. 4152 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4153 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4154 !isARMLowRegister(static_cast<ARMOperand*>(Operands[5])->getReg()) || 4155 !inITBlock() || 4156 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4157 static_cast<ARMOperand*>(Operands[5])->getReg() && 4158 static_cast<ARMOperand*>(Operands[3])->getReg() != 4159 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4160 return true; 4161 4162 // Also check the 'mul' syntax variant that doesn't specify an explicit 4163 // destination register. 4164 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 5 && 4165 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4166 static_cast<ARMOperand*>(Operands[3])->isReg() && 4167 static_cast<ARMOperand*>(Operands[4])->isReg() && 4168 // If the registers aren't low regs or the cc_out operand is zero 4169 // outside of an IT block, we have to use the 32-bit encoding, so 4170 // remove the cc_out operand. 4171 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4172 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4173 !inITBlock())) 4174 return true; 4175 4176 4177 4178 // Register-register 'add/sub' for thumb does not have a cc_out operand 4179 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4180 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4181 // right, this will result in better diagnostics (which operand is off) 4182 // anyway. 4183 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4184 (Operands.size() == 5 || Operands.size() == 6) && 4185 static_cast<ARMOperand*>(Operands[3])->isReg() && 4186 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4187 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4188 return true; 4189 4190 return false; 4191} 4192 4193static bool isDataTypeToken(StringRef Tok) { 4194 return Tok == ".8" || Tok == ".16" || Tok == ".32" || Tok == ".64" || 4195 Tok == ".i8" || Tok == ".i16" || Tok == ".i32" || Tok == ".i64" || 4196 Tok == ".u8" || Tok == ".u16" || Tok == ".u32" || Tok == ".u64" || 4197 Tok == ".s8" || Tok == ".s16" || Tok == ".s32" || Tok == ".s64" || 4198 Tok == ".p8" || Tok == ".p16" || Tok == ".f32" || Tok == ".f64" || 4199 Tok == ".f" || Tok == ".d"; 4200} 4201 4202// FIXME: This bit should probably be handled via an explicit match class 4203// in the .td files that matches the suffix instead of having it be 4204// a literal string token the way it is now. 4205static bool doesIgnoreDataTypeSuffix(StringRef Mnemonic, StringRef DT) { 4206 return Mnemonic.startswith("vldm") || Mnemonic.startswith("vstm"); 4207} 4208 4209/// Parse an arm instruction mnemonic followed by its operands. 4210bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4211 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4212 // Create the leading tokens for the mnemonic, split by '.' characters. 4213 size_t Start = 0, Next = Name.find('.'); 4214 StringRef Mnemonic = Name.slice(Start, Next); 4215 4216 // Split out the predication code and carry setting flag from the mnemonic. 4217 unsigned PredicationCode; 4218 unsigned ProcessorIMod; 4219 bool CarrySetting; 4220 StringRef ITMask; 4221 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4222 ProcessorIMod, ITMask); 4223 4224 // In Thumb1, only the branch (B) instruction can be predicated. 4225 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4226 Parser.EatToEndOfStatement(); 4227 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4228 } 4229 4230 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4231 4232 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4233 // is the mask as it will be for the IT encoding if the conditional 4234 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4235 // where the conditional bit0 is zero, the instruction post-processing 4236 // will adjust the mask accordingly. 4237 if (Mnemonic == "it") { 4238 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4239 if (ITMask.size() > 3) { 4240 Parser.EatToEndOfStatement(); 4241 return Error(Loc, "too many conditions on IT instruction"); 4242 } 4243 unsigned Mask = 8; 4244 for (unsigned i = ITMask.size(); i != 0; --i) { 4245 char pos = ITMask[i - 1]; 4246 if (pos != 't' && pos != 'e') { 4247 Parser.EatToEndOfStatement(); 4248 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4249 } 4250 Mask >>= 1; 4251 if (ITMask[i - 1] == 't') 4252 Mask |= 8; 4253 } 4254 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4255 } 4256 4257 // FIXME: This is all a pretty gross hack. We should automatically handle 4258 // optional operands like this via tblgen. 4259 4260 // Next, add the CCOut and ConditionCode operands, if needed. 4261 // 4262 // For mnemonics which can ever incorporate a carry setting bit or predication 4263 // code, our matching model involves us always generating CCOut and 4264 // ConditionCode operands to match the mnemonic "as written" and then we let 4265 // the matcher deal with finding the right instruction or generating an 4266 // appropriate error. 4267 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4268 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4269 4270 // If we had a carry-set on an instruction that can't do that, issue an 4271 // error. 4272 if (!CanAcceptCarrySet && CarrySetting) { 4273 Parser.EatToEndOfStatement(); 4274 return Error(NameLoc, "instruction '" + Mnemonic + 4275 "' can not set flags, but 's' suffix specified"); 4276 } 4277 // If we had a predication code on an instruction that can't do that, issue an 4278 // error. 4279 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4280 Parser.EatToEndOfStatement(); 4281 return Error(NameLoc, "instruction '" + Mnemonic + 4282 "' is not predicable, but condition code specified"); 4283 } 4284 4285 // Add the carry setting operand, if necessary. 4286 if (CanAcceptCarrySet) { 4287 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4288 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4289 Loc)); 4290 } 4291 4292 // Add the predication code operand, if necessary. 4293 if (CanAcceptPredicationCode) { 4294 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4295 CarrySetting); 4296 Operands.push_back(ARMOperand::CreateCondCode( 4297 ARMCC::CondCodes(PredicationCode), Loc)); 4298 } 4299 4300 // Add the processor imod operand, if necessary. 4301 if (ProcessorIMod) { 4302 Operands.push_back(ARMOperand::CreateImm( 4303 MCConstantExpr::Create(ProcessorIMod, getContext()), 4304 NameLoc, NameLoc)); 4305 } 4306 4307 // Add the remaining tokens in the mnemonic. 4308 while (Next != StringRef::npos) { 4309 Start = Next; 4310 Next = Name.find('.', Start + 1); 4311 StringRef ExtraToken = Name.slice(Start, Next); 4312 4313 // Some NEON instructions have an optional datatype suffix that is 4314 // completely ignored. Check for that. 4315 if (isDataTypeToken(ExtraToken) && 4316 doesIgnoreDataTypeSuffix(Mnemonic, ExtraToken)) 4317 continue; 4318 4319 if (ExtraToken != ".n") { 4320 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4321 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4322 } 4323 } 4324 4325 // Read the remaining operands. 4326 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4327 // Read the first operand. 4328 if (parseOperand(Operands, Mnemonic)) { 4329 Parser.EatToEndOfStatement(); 4330 return true; 4331 } 4332 4333 while (getLexer().is(AsmToken::Comma)) { 4334 Parser.Lex(); // Eat the comma. 4335 4336 // Parse and remember the operand. 4337 if (parseOperand(Operands, Mnemonic)) { 4338 Parser.EatToEndOfStatement(); 4339 return true; 4340 } 4341 } 4342 } 4343 4344 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4345 SMLoc Loc = getLexer().getLoc(); 4346 Parser.EatToEndOfStatement(); 4347 return Error(Loc, "unexpected token in argument list"); 4348 } 4349 4350 Parser.Lex(); // Consume the EndOfStatement 4351 4352 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4353 // do and don't have a cc_out optional-def operand. With some spot-checks 4354 // of the operand list, we can figure out which variant we're trying to 4355 // parse and adjust accordingly before actually matching. We shouldn't ever 4356 // try to remove a cc_out operand that was explicitly set on the the 4357 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4358 // table driven matcher doesn't fit well with the ARM instruction set. 4359 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4360 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4361 Operands.erase(Operands.begin() + 1); 4362 delete Op; 4363 } 4364 4365 // ARM mode 'blx' need special handling, as the register operand version 4366 // is predicable, but the label operand version is not. So, we can't rely 4367 // on the Mnemonic based checking to correctly figure out when to put 4368 // a k_CondCode operand in the list. If we're trying to match the label 4369 // version, remove the k_CondCode operand here. 4370 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4371 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4372 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4373 Operands.erase(Operands.begin() + 1); 4374 delete Op; 4375 } 4376 4377 // The vector-compare-to-zero instructions have a literal token "#0" at 4378 // the end that comes to here as an immediate operand. Convert it to a 4379 // token to play nicely with the matcher. 4380 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4381 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4382 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4383 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4384 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4385 if (CE && CE->getValue() == 0) { 4386 Operands.erase(Operands.begin() + 5); 4387 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4388 delete Op; 4389 } 4390 } 4391 // VCMP{E} does the same thing, but with a different operand count. 4392 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4393 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4394 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4395 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4396 if (CE && CE->getValue() == 0) { 4397 Operands.erase(Operands.begin() + 4); 4398 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4399 delete Op; 4400 } 4401 } 4402 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4403 // end. Convert it to a token here. 4404 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4405 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4406 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4407 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4408 if (CE && CE->getValue() == 0) { 4409 Operands.erase(Operands.begin() + 5); 4410 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4411 delete Op; 4412 } 4413 } 4414 4415 return false; 4416} 4417 4418// Validate context-sensitive operand constraints. 4419 4420// return 'true' if register list contains non-low GPR registers, 4421// 'false' otherwise. If Reg is in the register list or is HiReg, set 4422// 'containsReg' to true. 4423static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4424 unsigned HiReg, bool &containsReg) { 4425 containsReg = false; 4426 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4427 unsigned OpReg = Inst.getOperand(i).getReg(); 4428 if (OpReg == Reg) 4429 containsReg = true; 4430 // Anything other than a low register isn't legal here. 4431 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4432 return true; 4433 } 4434 return false; 4435} 4436 4437// Check if the specified regisgter is in the register list of the inst, 4438// starting at the indicated operand number. 4439static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4440 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4441 unsigned OpReg = Inst.getOperand(i).getReg(); 4442 if (OpReg == Reg) 4443 return true; 4444 } 4445 return false; 4446} 4447 4448// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4449// the ARMInsts array) instead. Getting that here requires awkward 4450// API changes, though. Better way? 4451namespace llvm { 4452extern const MCInstrDesc ARMInsts[]; 4453} 4454static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4455 return ARMInsts[Opcode]; 4456} 4457 4458// FIXME: We would really like to be able to tablegen'erate this. 4459bool ARMAsmParser:: 4460validateInstruction(MCInst &Inst, 4461 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4462 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4463 SMLoc Loc = Operands[0]->getStartLoc(); 4464 // Check the IT block state first. 4465 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4466 // being allowed in IT blocks, but not being predicable. It just always 4467 // executes. 4468 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4469 unsigned bit = 1; 4470 if (ITState.FirstCond) 4471 ITState.FirstCond = false; 4472 else 4473 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4474 // The instruction must be predicable. 4475 if (!MCID.isPredicable()) 4476 return Error(Loc, "instructions in IT block must be predicable"); 4477 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4478 unsigned ITCond = bit ? ITState.Cond : 4479 ARMCC::getOppositeCondition(ITState.Cond); 4480 if (Cond != ITCond) { 4481 // Find the condition code Operand to get its SMLoc information. 4482 SMLoc CondLoc; 4483 for (unsigned i = 1; i < Operands.size(); ++i) 4484 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4485 CondLoc = Operands[i]->getStartLoc(); 4486 return Error(CondLoc, "incorrect condition in IT block; got '" + 4487 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4488 "', but expected '" + 4489 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4490 } 4491 // Check for non-'al' condition codes outside of the IT block. 4492 } else if (isThumbTwo() && MCID.isPredicable() && 4493 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4494 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4495 Inst.getOpcode() != ARM::t2B) 4496 return Error(Loc, "predicated instructions must be in IT block"); 4497 4498 switch (Inst.getOpcode()) { 4499 case ARM::LDRD: 4500 case ARM::LDRD_PRE: 4501 case ARM::LDRD_POST: 4502 case ARM::LDREXD: { 4503 // Rt2 must be Rt + 1. 4504 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4505 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4506 if (Rt2 != Rt + 1) 4507 return Error(Operands[3]->getStartLoc(), 4508 "destination operands must be sequential"); 4509 return false; 4510 } 4511 case ARM::STRD: { 4512 // Rt2 must be Rt + 1. 4513 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4514 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4515 if (Rt2 != Rt + 1) 4516 return Error(Operands[3]->getStartLoc(), 4517 "source operands must be sequential"); 4518 return false; 4519 } 4520 case ARM::STRD_PRE: 4521 case ARM::STRD_POST: 4522 case ARM::STREXD: { 4523 // Rt2 must be Rt + 1. 4524 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4525 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4526 if (Rt2 != Rt + 1) 4527 return Error(Operands[3]->getStartLoc(), 4528 "source operands must be sequential"); 4529 return false; 4530 } 4531 case ARM::SBFX: 4532 case ARM::UBFX: { 4533 // width must be in range [1, 32-lsb] 4534 unsigned lsb = Inst.getOperand(2).getImm(); 4535 unsigned widthm1 = Inst.getOperand(3).getImm(); 4536 if (widthm1 >= 32 - lsb) 4537 return Error(Operands[5]->getStartLoc(), 4538 "bitfield width must be in range [1,32-lsb]"); 4539 return false; 4540 } 4541 case ARM::tLDMIA: { 4542 // If we're parsing Thumb2, the .w variant is available and handles 4543 // most cases that are normally illegal for a Thumb1 LDM 4544 // instruction. We'll make the transformation in processInstruction() 4545 // if necessary. 4546 // 4547 // Thumb LDM instructions are writeback iff the base register is not 4548 // in the register list. 4549 unsigned Rn = Inst.getOperand(0).getReg(); 4550 bool hasWritebackToken = 4551 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4552 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4553 bool listContainsBase; 4554 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4555 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4556 "registers must be in range r0-r7"); 4557 // If we should have writeback, then there should be a '!' token. 4558 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4559 return Error(Operands[2]->getStartLoc(), 4560 "writeback operator '!' expected"); 4561 // If we should not have writeback, there must not be a '!'. This is 4562 // true even for the 32-bit wide encodings. 4563 if (listContainsBase && hasWritebackToken) 4564 return Error(Operands[3]->getStartLoc(), 4565 "writeback operator '!' not allowed when base register " 4566 "in register list"); 4567 4568 break; 4569 } 4570 case ARM::t2LDMIA_UPD: { 4571 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4572 return Error(Operands[4]->getStartLoc(), 4573 "writeback operator '!' not allowed when base register " 4574 "in register list"); 4575 break; 4576 } 4577 // Like for ldm/stm, push and pop have hi-reg handling version in Thumb2, 4578 // so only issue a diagnostic for thumb1. The instructions will be 4579 // switched to the t2 encodings in processInstruction() if necessary. 4580 case ARM::tPOP: { 4581 bool listContainsBase; 4582 if (checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase) && 4583 !isThumbTwo()) 4584 return Error(Operands[2]->getStartLoc(), 4585 "registers must be in range r0-r7 or pc"); 4586 break; 4587 } 4588 case ARM::tPUSH: { 4589 bool listContainsBase; 4590 if (checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase) && 4591 !isThumbTwo()) 4592 return Error(Operands[2]->getStartLoc(), 4593 "registers must be in range r0-r7 or lr"); 4594 break; 4595 } 4596 case ARM::tSTMIA_UPD: { 4597 bool listContainsBase; 4598 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4599 return Error(Operands[4]->getStartLoc(), 4600 "registers must be in range r0-r7"); 4601 break; 4602 } 4603 } 4604 4605 return false; 4606} 4607 4608bool ARMAsmParser:: 4609processInstruction(MCInst &Inst, 4610 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4611 switch (Inst.getOpcode()) { 4612 // Handle the MOV complex aliases. 4613 case ARM::ASRr: 4614 case ARM::LSRr: 4615 case ARM::LSLr: 4616 case ARM::RORr: { 4617 ARM_AM::ShiftOpc ShiftTy; 4618 switch(Inst.getOpcode()) { 4619 default: llvm_unreachable("unexpected opcode!"); 4620 case ARM::ASRr: ShiftTy = ARM_AM::asr; break; 4621 case ARM::LSRr: ShiftTy = ARM_AM::lsr; break; 4622 case ARM::LSLr: ShiftTy = ARM_AM::lsl; break; 4623 case ARM::RORr: ShiftTy = ARM_AM::ror; break; 4624 } 4625 // A shift by zero is a plain MOVr, not a MOVsi. 4626 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, 0); 4627 MCInst TmpInst; 4628 TmpInst.setOpcode(ARM::MOVsr); 4629 TmpInst.addOperand(Inst.getOperand(0)); // Rd 4630 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4631 TmpInst.addOperand(Inst.getOperand(2)); // Rm 4632 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 4633 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 4634 TmpInst.addOperand(Inst.getOperand(4)); 4635 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 4636 Inst = TmpInst; 4637 return true; 4638 } 4639 case ARM::ASRi: 4640 case ARM::LSRi: 4641 case ARM::LSLi: 4642 case ARM::RORi: { 4643 ARM_AM::ShiftOpc ShiftTy; 4644 switch(Inst.getOpcode()) { 4645 default: llvm_unreachable("unexpected opcode!"); 4646 case ARM::ASRi: ShiftTy = ARM_AM::asr; break; 4647 case ARM::LSRi: ShiftTy = ARM_AM::lsr; break; 4648 case ARM::LSLi: ShiftTy = ARM_AM::lsl; break; 4649 case ARM::RORi: ShiftTy = ARM_AM::ror; break; 4650 } 4651 // A shift by zero is a plain MOVr, not a MOVsi. 4652 unsigned Amt = Inst.getOperand(2).getImm(); 4653 unsigned Opc = Amt == 0 ? ARM::MOVr : ARM::MOVsi; 4654 unsigned Shifter = ARM_AM::getSORegOpc(ShiftTy, Amt); 4655 MCInst TmpInst; 4656 TmpInst.setOpcode(Opc); 4657 TmpInst.addOperand(Inst.getOperand(0)); // Rd 4658 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4659 if (Opc == ARM::MOVsi) 4660 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 4661 TmpInst.addOperand(Inst.getOperand(3)); // CondCode 4662 TmpInst.addOperand(Inst.getOperand(4)); 4663 TmpInst.addOperand(Inst.getOperand(5)); // cc_out 4664 Inst = TmpInst; 4665 return true; 4666 } 4667 case ARM::RRXi: { 4668 unsigned Shifter = ARM_AM::getSORegOpc(ARM_AM::rrx, 0); 4669 MCInst TmpInst; 4670 TmpInst.setOpcode(ARM::MOVsi); 4671 TmpInst.addOperand(Inst.getOperand(0)); // Rd 4672 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4673 TmpInst.addOperand(MCOperand::CreateImm(Shifter)); // Shift value and ty 4674 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4675 TmpInst.addOperand(Inst.getOperand(3)); 4676 TmpInst.addOperand(Inst.getOperand(4)); // cc_out 4677 Inst = TmpInst; 4678 return true; 4679 } 4680 case ARM::t2LDMIA_UPD: { 4681 // If this is a load of a single register, then we should use 4682 // a post-indexed LDR instruction instead, per the ARM ARM. 4683 if (Inst.getNumOperands() != 5) 4684 return false; 4685 MCInst TmpInst; 4686 TmpInst.setOpcode(ARM::t2LDR_POST); 4687 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4688 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4689 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4690 TmpInst.addOperand(MCOperand::CreateImm(4)); 4691 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4692 TmpInst.addOperand(Inst.getOperand(3)); 4693 Inst = TmpInst; 4694 return true; 4695 } 4696 case ARM::t2STMDB_UPD: { 4697 // If this is a store of a single register, then we should use 4698 // a pre-indexed STR instruction instead, per the ARM ARM. 4699 if (Inst.getNumOperands() != 5) 4700 return false; 4701 MCInst TmpInst; 4702 TmpInst.setOpcode(ARM::t2STR_PRE); 4703 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4704 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4705 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4706 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4707 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4708 TmpInst.addOperand(Inst.getOperand(3)); 4709 Inst = TmpInst; 4710 return true; 4711 } 4712 case ARM::LDMIA_UPD: 4713 // If this is a load of a single register via a 'pop', then we should use 4714 // a post-indexed LDR instruction instead, per the ARM ARM. 4715 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4716 Inst.getNumOperands() == 5) { 4717 MCInst TmpInst; 4718 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4719 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4720 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4721 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4722 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4723 TmpInst.addOperand(MCOperand::CreateImm(4)); 4724 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4725 TmpInst.addOperand(Inst.getOperand(3)); 4726 Inst = TmpInst; 4727 return true; 4728 } 4729 break; 4730 case ARM::STMDB_UPD: 4731 // If this is a store of a single register via a 'push', then we should use 4732 // a pre-indexed STR instruction instead, per the ARM ARM. 4733 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4734 Inst.getNumOperands() == 5) { 4735 MCInst TmpInst; 4736 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4737 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4738 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4739 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4740 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4741 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4742 TmpInst.addOperand(Inst.getOperand(3)); 4743 Inst = TmpInst; 4744 } 4745 break; 4746 case ARM::tADDi8: 4747 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4748 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4749 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4750 // to encoding T1 if <Rd> is omitted." 4751 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 4752 Inst.setOpcode(ARM::tADDi3); 4753 return true; 4754 } 4755 break; 4756 case ARM::tSUBi8: 4757 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4758 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4759 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4760 // to encoding T1 if <Rd> is omitted." 4761 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) { 4762 Inst.setOpcode(ARM::tSUBi3); 4763 return true; 4764 } 4765 break; 4766 case ARM::tB: 4767 // A Thumb conditional branch outside of an IT block is a tBcc. 4768 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) { 4769 Inst.setOpcode(ARM::tBcc); 4770 return true; 4771 } 4772 break; 4773 case ARM::t2B: 4774 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4775 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()){ 4776 Inst.setOpcode(ARM::t2Bcc); 4777 return true; 4778 } 4779 break; 4780 case ARM::t2Bcc: 4781 // If the conditional is AL or we're in an IT block, we really want t2B. 4782 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) { 4783 Inst.setOpcode(ARM::t2B); 4784 return true; 4785 } 4786 break; 4787 case ARM::tBcc: 4788 // If the conditional is AL, we really want tB. 4789 if (Inst.getOperand(1).getImm() == ARMCC::AL) { 4790 Inst.setOpcode(ARM::tB); 4791 return true; 4792 } 4793 break; 4794 case ARM::tLDMIA: { 4795 // If the register list contains any high registers, or if the writeback 4796 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4797 // instead if we're in Thumb2. Otherwise, this should have generated 4798 // an error in validateInstruction(). 4799 unsigned Rn = Inst.getOperand(0).getReg(); 4800 bool hasWritebackToken = 4801 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4802 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4803 bool listContainsBase; 4804 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4805 (!listContainsBase && !hasWritebackToken) || 4806 (listContainsBase && hasWritebackToken)) { 4807 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4808 assert (isThumbTwo()); 4809 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4810 // If we're switching to the updating version, we need to insert 4811 // the writeback tied operand. 4812 if (hasWritebackToken) 4813 Inst.insert(Inst.begin(), 4814 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4815 return true; 4816 } 4817 break; 4818 } 4819 case ARM::tSTMIA_UPD: { 4820 // If the register list contains any high registers, we need to use 4821 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4822 // should have generated an error in validateInstruction(). 4823 unsigned Rn = Inst.getOperand(0).getReg(); 4824 bool listContainsBase; 4825 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4826 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4827 assert (isThumbTwo()); 4828 Inst.setOpcode(ARM::t2STMIA_UPD); 4829 return true; 4830 } 4831 break; 4832 } 4833 case ARM::tPOP: { 4834 bool listContainsBase; 4835 // If the register list contains any high registers, we need to use 4836 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4837 // should have generated an error in validateInstruction(). 4838 if (!checkLowRegisterList(Inst, 2, 0, ARM::PC, listContainsBase)) 4839 return false; 4840 assert (isThumbTwo()); 4841 Inst.setOpcode(ARM::t2LDMIA_UPD); 4842 // Add the base register and writeback operands. 4843 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4844 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4845 return true; 4846 } 4847 case ARM::tPUSH: { 4848 bool listContainsBase; 4849 if (!checkLowRegisterList(Inst, 2, 0, ARM::LR, listContainsBase)) 4850 return false; 4851 assert (isThumbTwo()); 4852 Inst.setOpcode(ARM::t2STMDB_UPD); 4853 // Add the base register and writeback operands. 4854 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4855 Inst.insert(Inst.begin(), MCOperand::CreateReg(ARM::SP)); 4856 return true; 4857 } 4858 case ARM::t2MOVi: { 4859 // If we can use the 16-bit encoding and the user didn't explicitly 4860 // request the 32-bit variant, transform it here. 4861 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4862 Inst.getOperand(1).getImm() <= 255 && 4863 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4864 Inst.getOperand(4).getReg() == ARM::CPSR) || 4865 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4866 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4867 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4868 // The operands aren't in the same order for tMOVi8... 4869 MCInst TmpInst; 4870 TmpInst.setOpcode(ARM::tMOVi8); 4871 TmpInst.addOperand(Inst.getOperand(0)); 4872 TmpInst.addOperand(Inst.getOperand(4)); 4873 TmpInst.addOperand(Inst.getOperand(1)); 4874 TmpInst.addOperand(Inst.getOperand(2)); 4875 TmpInst.addOperand(Inst.getOperand(3)); 4876 Inst = TmpInst; 4877 return true; 4878 } 4879 break; 4880 } 4881 case ARM::t2MOVr: { 4882 // If we can use the 16-bit encoding and the user didn't explicitly 4883 // request the 32-bit variant, transform it here. 4884 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4885 isARMLowRegister(Inst.getOperand(1).getReg()) && 4886 Inst.getOperand(2).getImm() == ARMCC::AL && 4887 Inst.getOperand(4).getReg() == ARM::CPSR && 4888 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4889 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4890 // The operands aren't the same for tMOV[S]r... (no cc_out) 4891 MCInst TmpInst; 4892 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4893 TmpInst.addOperand(Inst.getOperand(0)); 4894 TmpInst.addOperand(Inst.getOperand(1)); 4895 TmpInst.addOperand(Inst.getOperand(2)); 4896 TmpInst.addOperand(Inst.getOperand(3)); 4897 Inst = TmpInst; 4898 return true; 4899 } 4900 break; 4901 } 4902 case ARM::t2SXTH: 4903 case ARM::t2SXTB: 4904 case ARM::t2UXTH: 4905 case ARM::t2UXTB: { 4906 // If we can use the 16-bit encoding and the user didn't explicitly 4907 // request the 32-bit variant, transform it here. 4908 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4909 isARMLowRegister(Inst.getOperand(1).getReg()) && 4910 Inst.getOperand(2).getImm() == 0 && 4911 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4912 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4913 unsigned NewOpc; 4914 switch (Inst.getOpcode()) { 4915 default: llvm_unreachable("Illegal opcode!"); 4916 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4917 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4918 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4919 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4920 } 4921 // The operands aren't the same for thumb1 (no rotate operand). 4922 MCInst TmpInst; 4923 TmpInst.setOpcode(NewOpc); 4924 TmpInst.addOperand(Inst.getOperand(0)); 4925 TmpInst.addOperand(Inst.getOperand(1)); 4926 TmpInst.addOperand(Inst.getOperand(3)); 4927 TmpInst.addOperand(Inst.getOperand(4)); 4928 Inst = TmpInst; 4929 return true; 4930 } 4931 break; 4932 } 4933 case ARM::t2IT: { 4934 // The mask bits for all but the first condition are represented as 4935 // the low bit of the condition code value implies 't'. We currently 4936 // always have 1 implies 't', so XOR toggle the bits if the low bit 4937 // of the condition code is zero. The encoding also expects the low 4938 // bit of the condition to be encoded as bit 4 of the mask operand, 4939 // so mask that in if needed 4940 MCOperand &MO = Inst.getOperand(1); 4941 unsigned Mask = MO.getImm(); 4942 unsigned OrigMask = Mask; 4943 unsigned TZ = CountTrailingZeros_32(Mask); 4944 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4945 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4946 for (unsigned i = 3; i != TZ; --i) 4947 Mask ^= 1 << i; 4948 } else 4949 Mask |= 0x10; 4950 MO.setImm(Mask); 4951 4952 // Set up the IT block state according to the IT instruction we just 4953 // matched. 4954 assert(!inITBlock() && "nested IT blocks?!"); 4955 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4956 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4957 ITState.CurPosition = 0; 4958 ITState.FirstCond = true; 4959 break; 4960 } 4961 } 4962 return false; 4963} 4964 4965unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4966 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4967 // suffix depending on whether they're in an IT block or not. 4968 unsigned Opc = Inst.getOpcode(); 4969 const MCInstrDesc &MCID = getInstDesc(Opc); 4970 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4971 assert(MCID.hasOptionalDef() && 4972 "optionally flag setting instruction missing optional def operand"); 4973 assert(MCID.NumOperands == Inst.getNumOperands() && 4974 "operand count mismatch!"); 4975 // Find the optional-def operand (cc_out). 4976 unsigned OpNo; 4977 for (OpNo = 0; 4978 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4979 ++OpNo) 4980 ; 4981 // If we're parsing Thumb1, reject it completely. 4982 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4983 return Match_MnemonicFail; 4984 // If we're parsing Thumb2, which form is legal depends on whether we're 4985 // in an IT block. 4986 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4987 !inITBlock()) 4988 return Match_RequiresITBlock; 4989 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4990 inITBlock()) 4991 return Match_RequiresNotITBlock; 4992 } 4993 // Some high-register supporting Thumb1 encodings only allow both registers 4994 // to be from r0-r7 when in Thumb2. 4995 else if (Opc == ARM::tADDhirr && isThumbOne() && 4996 isARMLowRegister(Inst.getOperand(1).getReg()) && 4997 isARMLowRegister(Inst.getOperand(2).getReg())) 4998 return Match_RequiresThumb2; 4999 // Others only require ARMv6 or later. 5000 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 5001 isARMLowRegister(Inst.getOperand(0).getReg()) && 5002 isARMLowRegister(Inst.getOperand(1).getReg())) 5003 return Match_RequiresV6; 5004 return Match_Success; 5005} 5006 5007bool ARMAsmParser:: 5008MatchAndEmitInstruction(SMLoc IDLoc, 5009 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 5010 MCStreamer &Out) { 5011 MCInst Inst; 5012 unsigned ErrorInfo; 5013 unsigned MatchResult; 5014 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 5015 switch (MatchResult) { 5016 default: break; 5017 case Match_Success: 5018 // Context sensitive operand constraints aren't handled by the matcher, 5019 // so check them here. 5020 if (validateInstruction(Inst, Operands)) { 5021 // Still progress the IT block, otherwise one wrong condition causes 5022 // nasty cascading errors. 5023 forwardITPosition(); 5024 return true; 5025 } 5026 5027 // Some instructions need post-processing to, for example, tweak which 5028 // encoding is selected. Loop on it while changes happen so the 5029 // individual transformations can chain off each other. E.g., 5030 // tPOP(r8)->t2LDMIA_UPD(sp,r8)->t2STR_POST(sp,r8) 5031 while (processInstruction(Inst, Operands)) 5032 ; 5033 5034 // Only move forward at the very end so that everything in validate 5035 // and process gets a consistent answer about whether we're in an IT 5036 // block. 5037 forwardITPosition(); 5038 5039 Out.EmitInstruction(Inst); 5040 return false; 5041 case Match_MissingFeature: 5042 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 5043 return true; 5044 case Match_InvalidOperand: { 5045 SMLoc ErrorLoc = IDLoc; 5046 if (ErrorInfo != ~0U) { 5047 if (ErrorInfo >= Operands.size()) 5048 return Error(IDLoc, "too few operands for instruction"); 5049 5050 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 5051 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 5052 } 5053 5054 return Error(ErrorLoc, "invalid operand for instruction"); 5055 } 5056 case Match_MnemonicFail: 5057 return Error(IDLoc, "invalid instruction"); 5058 case Match_ConversionFail: 5059 // The converter function will have already emited a diagnostic. 5060 return true; 5061 case Match_RequiresNotITBlock: 5062 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 5063 case Match_RequiresITBlock: 5064 return Error(IDLoc, "instruction only valid inside IT block"); 5065 case Match_RequiresV6: 5066 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 5067 case Match_RequiresThumb2: 5068 return Error(IDLoc, "instruction variant requires Thumb2"); 5069 } 5070 5071 llvm_unreachable("Implement any new match types added!"); 5072 return true; 5073} 5074 5075/// parseDirective parses the arm specific directives 5076bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 5077 StringRef IDVal = DirectiveID.getIdentifier(); 5078 if (IDVal == ".word") 5079 return parseDirectiveWord(4, DirectiveID.getLoc()); 5080 else if (IDVal == ".thumb") 5081 return parseDirectiveThumb(DirectiveID.getLoc()); 5082 else if (IDVal == ".thumb_func") 5083 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 5084 else if (IDVal == ".code") 5085 return parseDirectiveCode(DirectiveID.getLoc()); 5086 else if (IDVal == ".syntax") 5087 return parseDirectiveSyntax(DirectiveID.getLoc()); 5088 return true; 5089} 5090 5091/// parseDirectiveWord 5092/// ::= .word [ expression (, expression)* ] 5093bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 5094 if (getLexer().isNot(AsmToken::EndOfStatement)) { 5095 for (;;) { 5096 const MCExpr *Value; 5097 if (getParser().ParseExpression(Value)) 5098 return true; 5099 5100 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 5101 5102 if (getLexer().is(AsmToken::EndOfStatement)) 5103 break; 5104 5105 // FIXME: Improve diagnostic. 5106 if (getLexer().isNot(AsmToken::Comma)) 5107 return Error(L, "unexpected token in directive"); 5108 Parser.Lex(); 5109 } 5110 } 5111 5112 Parser.Lex(); 5113 return false; 5114} 5115 5116/// parseDirectiveThumb 5117/// ::= .thumb 5118bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 5119 if (getLexer().isNot(AsmToken::EndOfStatement)) 5120 return Error(L, "unexpected token in directive"); 5121 Parser.Lex(); 5122 5123 // TODO: set thumb mode 5124 // TODO: tell the MC streamer the mode 5125 // getParser().getStreamer().Emit???(); 5126 return false; 5127} 5128 5129/// parseDirectiveThumbFunc 5130/// ::= .thumbfunc symbol_name 5131bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 5132 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 5133 bool isMachO = MAI.hasSubsectionsViaSymbols(); 5134 StringRef Name; 5135 5136 // Darwin asm has function name after .thumb_func direction 5137 // ELF doesn't 5138 if (isMachO) { 5139 const AsmToken &Tok = Parser.getTok(); 5140 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 5141 return Error(L, "unexpected token in .thumb_func directive"); 5142 Name = Tok.getIdentifier(); 5143 Parser.Lex(); // Consume the identifier token. 5144 } 5145 5146 if (getLexer().isNot(AsmToken::EndOfStatement)) 5147 return Error(L, "unexpected token in directive"); 5148 Parser.Lex(); 5149 5150 // FIXME: assuming function name will be the line following .thumb_func 5151 if (!isMachO) { 5152 Name = Parser.getTok().getIdentifier(); 5153 } 5154 5155 // Mark symbol as a thumb symbol. 5156 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 5157 getParser().getStreamer().EmitThumbFunc(Func); 5158 return false; 5159} 5160 5161/// parseDirectiveSyntax 5162/// ::= .syntax unified | divided 5163bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 5164 const AsmToken &Tok = Parser.getTok(); 5165 if (Tok.isNot(AsmToken::Identifier)) 5166 return Error(L, "unexpected token in .syntax directive"); 5167 StringRef Mode = Tok.getString(); 5168 if (Mode == "unified" || Mode == "UNIFIED") 5169 Parser.Lex(); 5170 else if (Mode == "divided" || Mode == "DIVIDED") 5171 return Error(L, "'.syntax divided' arm asssembly not supported"); 5172 else 5173 return Error(L, "unrecognized syntax mode in .syntax directive"); 5174 5175 if (getLexer().isNot(AsmToken::EndOfStatement)) 5176 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5177 Parser.Lex(); 5178 5179 // TODO tell the MC streamer the mode 5180 // getParser().getStreamer().Emit???(); 5181 return false; 5182} 5183 5184/// parseDirectiveCode 5185/// ::= .code 16 | 32 5186bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 5187 const AsmToken &Tok = Parser.getTok(); 5188 if (Tok.isNot(AsmToken::Integer)) 5189 return Error(L, "unexpected token in .code directive"); 5190 int64_t Val = Parser.getTok().getIntVal(); 5191 if (Val == 16) 5192 Parser.Lex(); 5193 else if (Val == 32) 5194 Parser.Lex(); 5195 else 5196 return Error(L, "invalid operand to .code directive"); 5197 5198 if (getLexer().isNot(AsmToken::EndOfStatement)) 5199 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 5200 Parser.Lex(); 5201 5202 if (Val == 16) { 5203 if (!isThumb()) 5204 SwitchMode(); 5205 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 5206 } else { 5207 if (isThumb()) 5208 SwitchMode(); 5209 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 5210 } 5211 5212 return false; 5213} 5214 5215extern "C" void LLVMInitializeARMAsmLexer(); 5216 5217/// Force static initialization. 5218extern "C" void LLVMInitializeARMAsmParser() { 5219 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 5220 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 5221 LLVMInitializeARMAsmLexer(); 5222} 5223 5224#define GET_REGISTER_MATCHER 5225#define GET_MATCHER_IMPLEMENTATION 5226#include "ARMGenAsmMatcher.inc" 5227