ARMAsmParser.cpp revision 12431329d617064d6e72dd040a58c1635cc261ab
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringExtras.h" 34#include "llvm/ADT/StringSwitch.h" 35#include "llvm/ADT/Twine.h" 36 37using namespace llvm; 38 39namespace { 40 41class ARMOperand; 42 43class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131#define GET_ASSEMBLER_HEADER 132#include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 165 166 // Asm Match Converter Methods 167 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 168 const SmallVectorImpl<MCParsedAsmOperand*> &); 169 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 170 const SmallVectorImpl<MCParsedAsmOperand*> &); 171 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 172 const SmallVectorImpl<MCParsedAsmOperand*> &); 173 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 174 const SmallVectorImpl<MCParsedAsmOperand*> &); 175 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 202 const SmallVectorImpl<MCParsedAsmOperand*> &); 203 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 204 const SmallVectorImpl<MCParsedAsmOperand*> &); 205 206 bool validateInstruction(MCInst &Inst, 207 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 208 void processInstruction(MCInst &Inst, 209 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 210 bool shouldOmitCCOutOperand(StringRef Mnemonic, 211 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 212 213public: 214 enum ARMMatchResultTy { 215 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 216 Match_RequiresNotITBlock, 217 Match_RequiresV6, 218 Match_RequiresThumb2 219 }; 220 221 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 222 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 223 MCAsmParserExtension::Initialize(_Parser); 224 225 // Initialize the set of available features. 226 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 227 228 // Not in an ITBlock to start with. 229 ITState.CurPosition = ~0U; 230 } 231 232 // Implementation of the MCTargetAsmParser interface: 233 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 234 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 235 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 236 bool ParseDirective(AsmToken DirectiveID); 237 238 unsigned checkTargetMatchPredicate(MCInst &Inst); 239 240 bool MatchAndEmitInstruction(SMLoc IDLoc, 241 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 242 MCStreamer &Out); 243}; 244} // end anonymous namespace 245 246namespace { 247 248/// ARMOperand - Instances of this class represent a parsed ARM machine 249/// instruction. 250class ARMOperand : public MCParsedAsmOperand { 251 enum KindTy { 252 k_CondCode, 253 k_CCOut, 254 k_ITCondMask, 255 k_CoprocNum, 256 k_CoprocReg, 257 k_CoprocOption, 258 k_Immediate, 259 k_FPImmediate, 260 k_MemBarrierOpt, 261 k_Memory, 262 k_PostIndexRegister, 263 k_MSRMask, 264 k_ProcIFlags, 265 k_VectorIndex, 266 k_Register, 267 k_RegisterList, 268 k_DPRRegisterList, 269 k_SPRRegisterList, 270 k_VectorList, 271 k_ShiftedRegister, 272 k_ShiftedImmediate, 273 k_ShifterImmediate, 274 k_RotateImmediate, 275 k_BitfieldDescriptor, 276 k_Token 277 } Kind; 278 279 SMLoc StartLoc, EndLoc; 280 SmallVector<unsigned, 8> Registers; 281 282 union { 283 struct { 284 ARMCC::CondCodes Val; 285 } CC; 286 287 struct { 288 unsigned Val; 289 } Cop; 290 291 struct { 292 unsigned Val; 293 } CoprocOption; 294 295 struct { 296 unsigned Mask:4; 297 } ITMask; 298 299 struct { 300 ARM_MB::MemBOpt Val; 301 } MBOpt; 302 303 struct { 304 ARM_PROC::IFlags Val; 305 } IFlags; 306 307 struct { 308 unsigned Val; 309 } MMask; 310 311 struct { 312 const char *Data; 313 unsigned Length; 314 } Tok; 315 316 struct { 317 unsigned RegNum; 318 } Reg; 319 320 // A vector register list is a sequential list of 1 to 4 registers. 321 struct { 322 unsigned RegNum; 323 unsigned Count; 324 } VectorList; 325 326 struct { 327 unsigned Val; 328 } VectorIndex; 329 330 struct { 331 const MCExpr *Val; 332 } Imm; 333 334 struct { 335 unsigned Val; // encoded 8-bit representation 336 } FPImm; 337 338 /// Combined record for all forms of ARM address expressions. 339 struct { 340 unsigned BaseRegNum; 341 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 342 // was specified. 343 const MCConstantExpr *OffsetImm; // Offset immediate value 344 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 345 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 346 unsigned ShiftImm; // shift for OffsetReg. 347 unsigned Alignment; // 0 = no alignment specified 348 // n = alignment in bytes (8, 16, or 32) 349 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 350 } Memory; 351 352 struct { 353 unsigned RegNum; 354 bool isAdd; 355 ARM_AM::ShiftOpc ShiftTy; 356 unsigned ShiftImm; 357 } PostIdxReg; 358 359 struct { 360 bool isASR; 361 unsigned Imm; 362 } ShifterImm; 363 struct { 364 ARM_AM::ShiftOpc ShiftTy; 365 unsigned SrcReg; 366 unsigned ShiftReg; 367 unsigned ShiftImm; 368 } RegShiftedReg; 369 struct { 370 ARM_AM::ShiftOpc ShiftTy; 371 unsigned SrcReg; 372 unsigned ShiftImm; 373 } RegShiftedImm; 374 struct { 375 unsigned Imm; 376 } RotImm; 377 struct { 378 unsigned LSB; 379 unsigned Width; 380 } Bitfield; 381 }; 382 383 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 384public: 385 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 386 Kind = o.Kind; 387 StartLoc = o.StartLoc; 388 EndLoc = o.EndLoc; 389 switch (Kind) { 390 case k_CondCode: 391 CC = o.CC; 392 break; 393 case k_ITCondMask: 394 ITMask = o.ITMask; 395 break; 396 case k_Token: 397 Tok = o.Tok; 398 break; 399 case k_CCOut: 400 case k_Register: 401 Reg = o.Reg; 402 break; 403 case k_RegisterList: 404 case k_DPRRegisterList: 405 case k_SPRRegisterList: 406 Registers = o.Registers; 407 break; 408 case k_VectorList: 409 VectorList = o.VectorList; 410 break; 411 case k_CoprocNum: 412 case k_CoprocReg: 413 Cop = o.Cop; 414 break; 415 case k_CoprocOption: 416 CoprocOption = o.CoprocOption; 417 break; 418 case k_Immediate: 419 Imm = o.Imm; 420 break; 421 case k_FPImmediate: 422 FPImm = o.FPImm; 423 break; 424 case k_MemBarrierOpt: 425 MBOpt = o.MBOpt; 426 break; 427 case k_Memory: 428 Memory = o.Memory; 429 break; 430 case k_PostIndexRegister: 431 PostIdxReg = o.PostIdxReg; 432 break; 433 case k_MSRMask: 434 MMask = o.MMask; 435 break; 436 case k_ProcIFlags: 437 IFlags = o.IFlags; 438 break; 439 case k_ShifterImmediate: 440 ShifterImm = o.ShifterImm; 441 break; 442 case k_ShiftedRegister: 443 RegShiftedReg = o.RegShiftedReg; 444 break; 445 case k_ShiftedImmediate: 446 RegShiftedImm = o.RegShiftedImm; 447 break; 448 case k_RotateImmediate: 449 RotImm = o.RotImm; 450 break; 451 case k_BitfieldDescriptor: 452 Bitfield = o.Bitfield; 453 break; 454 case k_VectorIndex: 455 VectorIndex = o.VectorIndex; 456 break; 457 } 458 } 459 460 /// getStartLoc - Get the location of the first token of this operand. 461 SMLoc getStartLoc() const { return StartLoc; } 462 /// getEndLoc - Get the location of the last token of this operand. 463 SMLoc getEndLoc() const { return EndLoc; } 464 465 ARMCC::CondCodes getCondCode() const { 466 assert(Kind == k_CondCode && "Invalid access!"); 467 return CC.Val; 468 } 469 470 unsigned getCoproc() const { 471 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 472 return Cop.Val; 473 } 474 475 StringRef getToken() const { 476 assert(Kind == k_Token && "Invalid access!"); 477 return StringRef(Tok.Data, Tok.Length); 478 } 479 480 unsigned getReg() const { 481 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 482 return Reg.RegNum; 483 } 484 485 const SmallVectorImpl<unsigned> &getRegList() const { 486 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 487 Kind == k_SPRRegisterList) && "Invalid access!"); 488 return Registers; 489 } 490 491 const MCExpr *getImm() const { 492 assert(Kind == k_Immediate && "Invalid access!"); 493 return Imm.Val; 494 } 495 496 unsigned getFPImm() const { 497 assert(Kind == k_FPImmediate && "Invalid access!"); 498 return FPImm.Val; 499 } 500 501 unsigned getVectorIndex() const { 502 assert(Kind == k_VectorIndex && "Invalid access!"); 503 return VectorIndex.Val; 504 } 505 506 ARM_MB::MemBOpt getMemBarrierOpt() const { 507 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 508 return MBOpt.Val; 509 } 510 511 ARM_PROC::IFlags getProcIFlags() const { 512 assert(Kind == k_ProcIFlags && "Invalid access!"); 513 return IFlags.Val; 514 } 515 516 unsigned getMSRMask() const { 517 assert(Kind == k_MSRMask && "Invalid access!"); 518 return MMask.Val; 519 } 520 521 bool isCoprocNum() const { return Kind == k_CoprocNum; } 522 bool isCoprocReg() const { return Kind == k_CoprocReg; } 523 bool isCoprocOption() const { return Kind == k_CoprocOption; } 524 bool isCondCode() const { return Kind == k_CondCode; } 525 bool isCCOut() const { return Kind == k_CCOut; } 526 bool isITMask() const { return Kind == k_ITCondMask; } 527 bool isITCondCode() const { return Kind == k_CondCode; } 528 bool isImm() const { return Kind == k_Immediate; } 529 bool isFPImm() const { return Kind == k_FPImmediate; } 530 bool isImm8s4() const { 531 if (Kind != k_Immediate) 532 return false; 533 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 534 if (!CE) return false; 535 int64_t Value = CE->getValue(); 536 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 537 } 538 bool isImm0_1020s4() const { 539 if (Kind != k_Immediate) 540 return false; 541 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 542 if (!CE) return false; 543 int64_t Value = CE->getValue(); 544 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 545 } 546 bool isImm0_508s4() const { 547 if (Kind != k_Immediate) 548 return false; 549 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 550 if (!CE) return false; 551 int64_t Value = CE->getValue(); 552 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 553 } 554 bool isImm0_255() const { 555 if (Kind != k_Immediate) 556 return false; 557 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 558 if (!CE) return false; 559 int64_t Value = CE->getValue(); 560 return Value >= 0 && Value < 256; 561 } 562 bool isImm0_7() const { 563 if (Kind != k_Immediate) 564 return false; 565 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 566 if (!CE) return false; 567 int64_t Value = CE->getValue(); 568 return Value >= 0 && Value < 8; 569 } 570 bool isImm0_15() const { 571 if (Kind != k_Immediate) 572 return false; 573 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 574 if (!CE) return false; 575 int64_t Value = CE->getValue(); 576 return Value >= 0 && Value < 16; 577 } 578 bool isImm0_31() const { 579 if (Kind != k_Immediate) 580 return false; 581 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 582 if (!CE) return false; 583 int64_t Value = CE->getValue(); 584 return Value >= 0 && Value < 32; 585 } 586 bool isImm1_16() const { 587 if (Kind != k_Immediate) 588 return false; 589 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 590 if (!CE) return false; 591 int64_t Value = CE->getValue(); 592 return Value > 0 && Value < 17; 593 } 594 bool isImm1_32() const { 595 if (Kind != k_Immediate) 596 return false; 597 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 598 if (!CE) return false; 599 int64_t Value = CE->getValue(); 600 return Value > 0 && Value < 33; 601 } 602 bool isImm0_65535() const { 603 if (Kind != k_Immediate) 604 return false; 605 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 606 if (!CE) return false; 607 int64_t Value = CE->getValue(); 608 return Value >= 0 && Value < 65536; 609 } 610 bool isImm0_65535Expr() const { 611 if (Kind != k_Immediate) 612 return false; 613 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 614 // If it's not a constant expression, it'll generate a fixup and be 615 // handled later. 616 if (!CE) return true; 617 int64_t Value = CE->getValue(); 618 return Value >= 0 && Value < 65536; 619 } 620 bool isImm24bit() const { 621 if (Kind != k_Immediate) 622 return false; 623 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 624 if (!CE) return false; 625 int64_t Value = CE->getValue(); 626 return Value >= 0 && Value <= 0xffffff; 627 } 628 bool isImmThumbSR() const { 629 if (Kind != k_Immediate) 630 return false; 631 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 632 if (!CE) return false; 633 int64_t Value = CE->getValue(); 634 return Value > 0 && Value < 33; 635 } 636 bool isPKHLSLImm() const { 637 if (Kind != k_Immediate) 638 return false; 639 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 640 if (!CE) return false; 641 int64_t Value = CE->getValue(); 642 return Value >= 0 && Value < 32; 643 } 644 bool isPKHASRImm() const { 645 if (Kind != k_Immediate) 646 return false; 647 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 648 if (!CE) return false; 649 int64_t Value = CE->getValue(); 650 return Value > 0 && Value <= 32; 651 } 652 bool isARMSOImm() const { 653 if (Kind != k_Immediate) 654 return false; 655 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 656 if (!CE) return false; 657 int64_t Value = CE->getValue(); 658 return ARM_AM::getSOImmVal(Value) != -1; 659 } 660 bool isT2SOImm() const { 661 if (Kind != k_Immediate) 662 return false; 663 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 664 if (!CE) return false; 665 int64_t Value = CE->getValue(); 666 return ARM_AM::getT2SOImmVal(Value) != -1; 667 } 668 bool isSetEndImm() const { 669 if (Kind != k_Immediate) 670 return false; 671 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 672 if (!CE) return false; 673 int64_t Value = CE->getValue(); 674 return Value == 1 || Value == 0; 675 } 676 bool isReg() const { return Kind == k_Register; } 677 bool isRegList() const { return Kind == k_RegisterList; } 678 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 679 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 680 bool isToken() const { return Kind == k_Token; } 681 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 682 bool isMemory() const { return Kind == k_Memory; } 683 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 684 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 685 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 686 bool isRotImm() const { return Kind == k_RotateImmediate; } 687 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 688 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 689 bool isPostIdxReg() const { 690 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 691 } 692 bool isMemNoOffset(bool alignOK = false) const { 693 if (!isMemory()) 694 return false; 695 // No offset of any kind. 696 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 697 (alignOK || Memory.Alignment == 0); 698 } 699 bool isAlignedMemory() const { 700 return isMemNoOffset(true); 701 } 702 bool isAddrMode2() const { 703 if (!isMemory() || Memory.Alignment != 0) return false; 704 // Check for register offset. 705 if (Memory.OffsetRegNum) return true; 706 // Immediate offset in range [-4095, 4095]. 707 if (!Memory.OffsetImm) return true; 708 int64_t Val = Memory.OffsetImm->getValue(); 709 return Val > -4096 && Val < 4096; 710 } 711 bool isAM2OffsetImm() const { 712 if (Kind != k_Immediate) 713 return false; 714 // Immediate offset in range [-4095, 4095]. 715 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 716 if (!CE) return false; 717 int64_t Val = CE->getValue(); 718 return Val > -4096 && Val < 4096; 719 } 720 bool isAddrMode3() const { 721 if (!isMemory() || Memory.Alignment != 0) return false; 722 // No shifts are legal for AM3. 723 if (Memory.ShiftType != ARM_AM::no_shift) return false; 724 // Check for register offset. 725 if (Memory.OffsetRegNum) return true; 726 // Immediate offset in range [-255, 255]. 727 if (!Memory.OffsetImm) return true; 728 int64_t Val = Memory.OffsetImm->getValue(); 729 return Val > -256 && Val < 256; 730 } 731 bool isAM3Offset() const { 732 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 733 return false; 734 if (Kind == k_PostIndexRegister) 735 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 736 // Immediate offset in range [-255, 255]. 737 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 738 if (!CE) return false; 739 int64_t Val = CE->getValue(); 740 // Special case, #-0 is INT32_MIN. 741 return (Val > -256 && Val < 256) || Val == INT32_MIN; 742 } 743 bool isAddrMode5() const { 744 if (!isMemory() || Memory.Alignment != 0) return false; 745 // Check for register offset. 746 if (Memory.OffsetRegNum) return false; 747 // Immediate offset in range [-1020, 1020] and a multiple of 4. 748 if (!Memory.OffsetImm) return true; 749 int64_t Val = Memory.OffsetImm->getValue(); 750 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 751 Val == INT32_MIN; 752 } 753 bool isMemTBB() const { 754 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 755 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 756 return false; 757 return true; 758 } 759 bool isMemTBH() const { 760 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 761 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 762 Memory.Alignment != 0 ) 763 return false; 764 return true; 765 } 766 bool isMemRegOffset() const { 767 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 768 return false; 769 return true; 770 } 771 bool isT2MemRegOffset() const { 772 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 773 Memory.Alignment != 0) 774 return false; 775 // Only lsl #{0, 1, 2, 3} allowed. 776 if (Memory.ShiftType == ARM_AM::no_shift) 777 return true; 778 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 779 return false; 780 return true; 781 } 782 bool isMemThumbRR() const { 783 // Thumb reg+reg addressing is simple. Just two registers, a base and 784 // an offset. No shifts, negations or any other complicating factors. 785 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 786 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 787 return false; 788 return isARMLowRegister(Memory.BaseRegNum) && 789 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 790 } 791 bool isMemThumbRIs4() const { 792 if (!isMemory() || Memory.OffsetRegNum != 0 || 793 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 794 return false; 795 // Immediate offset, multiple of 4 in range [0, 124]. 796 if (!Memory.OffsetImm) return true; 797 int64_t Val = Memory.OffsetImm->getValue(); 798 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 799 } 800 bool isMemThumbRIs2() const { 801 if (!isMemory() || Memory.OffsetRegNum != 0 || 802 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 803 return false; 804 // Immediate offset, multiple of 4 in range [0, 62]. 805 if (!Memory.OffsetImm) return true; 806 int64_t Val = Memory.OffsetImm->getValue(); 807 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 808 } 809 bool isMemThumbRIs1() const { 810 if (!isMemory() || Memory.OffsetRegNum != 0 || 811 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 812 return false; 813 // Immediate offset in range [0, 31]. 814 if (!Memory.OffsetImm) return true; 815 int64_t Val = Memory.OffsetImm->getValue(); 816 return Val >= 0 && Val <= 31; 817 } 818 bool isMemThumbSPI() const { 819 if (!isMemory() || Memory.OffsetRegNum != 0 || 820 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 821 return false; 822 // Immediate offset, multiple of 4 in range [0, 1020]. 823 if (!Memory.OffsetImm) return true; 824 int64_t Val = Memory.OffsetImm->getValue(); 825 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 826 } 827 bool isMemImm8s4Offset() const { 828 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 829 return false; 830 // Immediate offset a multiple of 4 in range [-1020, 1020]. 831 if (!Memory.OffsetImm) return true; 832 int64_t Val = Memory.OffsetImm->getValue(); 833 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 834 } 835 bool isMemImm0_1020s4Offset() const { 836 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 837 return false; 838 // Immediate offset a multiple of 4 in range [0, 1020]. 839 if (!Memory.OffsetImm) return true; 840 int64_t Val = Memory.OffsetImm->getValue(); 841 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 842 } 843 bool isMemImm8Offset() const { 844 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 845 return false; 846 // Immediate offset in range [-255, 255]. 847 if (!Memory.OffsetImm) return true; 848 int64_t Val = Memory.OffsetImm->getValue(); 849 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 850 } 851 bool isMemPosImm8Offset() const { 852 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 853 return false; 854 // Immediate offset in range [0, 255]. 855 if (!Memory.OffsetImm) return true; 856 int64_t Val = Memory.OffsetImm->getValue(); 857 return Val >= 0 && Val < 256; 858 } 859 bool isMemNegImm8Offset() const { 860 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 861 return false; 862 // Immediate offset in range [-255, -1]. 863 if (!Memory.OffsetImm) return true; 864 int64_t Val = Memory.OffsetImm->getValue(); 865 return Val > -256 && Val < 0; 866 } 867 bool isMemUImm12Offset() const { 868 // If we have an immediate that's not a constant, treat it as a label 869 // reference needing a fixup. If it is a constant, it's something else 870 // and we reject it. 871 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 872 return true; 873 874 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 875 return false; 876 // Immediate offset in range [0, 4095]. 877 if (!Memory.OffsetImm) return true; 878 int64_t Val = Memory.OffsetImm->getValue(); 879 return (Val >= 0 && Val < 4096); 880 } 881 bool isMemImm12Offset() const { 882 // If we have an immediate that's not a constant, treat it as a label 883 // reference needing a fixup. If it is a constant, it's something else 884 // and we reject it. 885 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 886 return true; 887 888 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 889 return false; 890 // Immediate offset in range [-4095, 4095]. 891 if (!Memory.OffsetImm) return true; 892 int64_t Val = Memory.OffsetImm->getValue(); 893 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 894 } 895 bool isPostIdxImm8() const { 896 if (Kind != k_Immediate) 897 return false; 898 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 899 if (!CE) return false; 900 int64_t Val = CE->getValue(); 901 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 902 } 903 bool isPostIdxImm8s4() const { 904 if (Kind != k_Immediate) 905 return false; 906 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 907 if (!CE) return false; 908 int64_t Val = CE->getValue(); 909 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 910 (Val == INT32_MIN); 911 } 912 913 bool isMSRMask() const { return Kind == k_MSRMask; } 914 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 915 916 // NEON operands. 917 bool isVecListOneD() const { 918 if (Kind != k_VectorList) return false; 919 return VectorList.Count == 1; 920 } 921 922 bool isVecListTwoD() const { 923 if (Kind != k_VectorList) return false; 924 return VectorList.Count == 2; 925 } 926 927 bool isVecListThreeD() const { 928 if (Kind != k_VectorList) return false; 929 return VectorList.Count == 3; 930 } 931 932 bool isVecListFourD() const { 933 if (Kind != k_VectorList) return false; 934 return VectorList.Count == 4; 935 } 936 937 bool isVecListTwoQ() const { 938 if (Kind != k_VectorList) return false; 939 //FIXME: We haven't taught the parser to handle by-two register lists 940 // yet, so don't pretend to know one. 941 return VectorList.Count == 2 && false; 942 } 943 944 bool isVectorIndex8() const { 945 if (Kind != k_VectorIndex) return false; 946 return VectorIndex.Val < 8; 947 } 948 bool isVectorIndex16() const { 949 if (Kind != k_VectorIndex) return false; 950 return VectorIndex.Val < 4; 951 } 952 bool isVectorIndex32() const { 953 if (Kind != k_VectorIndex) return false; 954 return VectorIndex.Val < 2; 955 } 956 957 bool isNEONi8splat() const { 958 if (Kind != k_Immediate) 959 return false; 960 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 961 // Must be a constant. 962 if (!CE) return false; 963 int64_t Value = CE->getValue(); 964 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 965 // value. 966 return Value >= 0 && Value < 256; 967 } 968 969 bool isNEONi16splat() const { 970 if (Kind != k_Immediate) 971 return false; 972 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 973 // Must be a constant. 974 if (!CE) return false; 975 int64_t Value = CE->getValue(); 976 // i16 value in the range [0,255] or [0x0100, 0xff00] 977 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 978 } 979 980 bool isNEONi32splat() const { 981 if (Kind != k_Immediate) 982 return false; 983 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 984 // Must be a constant. 985 if (!CE) return false; 986 int64_t Value = CE->getValue(); 987 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 988 return (Value >= 0 && Value < 256) || 989 (Value >= 0x0100 && Value <= 0xff00) || 990 (Value >= 0x010000 && Value <= 0xff0000) || 991 (Value >= 0x01000000 && Value <= 0xff000000); 992 } 993 994 bool isNEONi32vmov() const { 995 if (Kind != k_Immediate) 996 return false; 997 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 998 // Must be a constant. 999 if (!CE) return false; 1000 int64_t Value = CE->getValue(); 1001 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 1002 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 1003 return (Value >= 0 && Value < 256) || 1004 (Value >= 0x0100 && Value <= 0xff00) || 1005 (Value >= 0x010000 && Value <= 0xff0000) || 1006 (Value >= 0x01000000 && Value <= 0xff000000) || 1007 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1008 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1009 } 1010 1011 bool isNEONi64splat() const { 1012 if (Kind != k_Immediate) 1013 return false; 1014 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1015 // Must be a constant. 1016 if (!CE) return false; 1017 uint64_t Value = CE->getValue(); 1018 // i64 value with each byte being either 0 or 0xff. 1019 for (unsigned i = 0; i < 8; ++i) 1020 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1021 return true; 1022 } 1023 1024 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1025 // Add as immediates when possible. Null MCExpr = 0. 1026 if (Expr == 0) 1027 Inst.addOperand(MCOperand::CreateImm(0)); 1028 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1029 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1030 else 1031 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1032 } 1033 1034 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1035 assert(N == 2 && "Invalid number of operands!"); 1036 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1037 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1038 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1039 } 1040 1041 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1042 assert(N == 1 && "Invalid number of operands!"); 1043 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1044 } 1045 1046 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1047 assert(N == 1 && "Invalid number of operands!"); 1048 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1049 } 1050 1051 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1052 assert(N == 1 && "Invalid number of operands!"); 1053 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1054 } 1055 1056 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1057 assert(N == 1 && "Invalid number of operands!"); 1058 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1059 } 1060 1061 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1062 assert(N == 1 && "Invalid number of operands!"); 1063 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1064 } 1065 1066 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1067 assert(N == 1 && "Invalid number of operands!"); 1068 Inst.addOperand(MCOperand::CreateReg(getReg())); 1069 } 1070 1071 void addRegOperands(MCInst &Inst, unsigned N) const { 1072 assert(N == 1 && "Invalid number of operands!"); 1073 Inst.addOperand(MCOperand::CreateReg(getReg())); 1074 } 1075 1076 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1077 assert(N == 3 && "Invalid number of operands!"); 1078 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1079 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1080 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1081 Inst.addOperand(MCOperand::CreateImm( 1082 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1083 } 1084 1085 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1086 assert(N == 2 && "Invalid number of operands!"); 1087 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1088 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1089 Inst.addOperand(MCOperand::CreateImm( 1090 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1091 } 1092 1093 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1094 assert(N == 1 && "Invalid number of operands!"); 1095 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1096 ShifterImm.Imm)); 1097 } 1098 1099 void addRegListOperands(MCInst &Inst, unsigned N) const { 1100 assert(N == 1 && "Invalid number of operands!"); 1101 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1102 for (SmallVectorImpl<unsigned>::const_iterator 1103 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1104 Inst.addOperand(MCOperand::CreateReg(*I)); 1105 } 1106 1107 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1108 addRegListOperands(Inst, N); 1109 } 1110 1111 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1112 addRegListOperands(Inst, N); 1113 } 1114 1115 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1116 assert(N == 1 && "Invalid number of operands!"); 1117 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1118 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1119 } 1120 1121 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1122 assert(N == 1 && "Invalid number of operands!"); 1123 // Munge the lsb/width into a bitfield mask. 1124 unsigned lsb = Bitfield.LSB; 1125 unsigned width = Bitfield.Width; 1126 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1127 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1128 (32 - (lsb + width))); 1129 Inst.addOperand(MCOperand::CreateImm(Mask)); 1130 } 1131 1132 void addImmOperands(MCInst &Inst, unsigned N) const { 1133 assert(N == 1 && "Invalid number of operands!"); 1134 addExpr(Inst, getImm()); 1135 } 1136 1137 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1138 assert(N == 1 && "Invalid number of operands!"); 1139 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1140 } 1141 1142 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1143 assert(N == 1 && "Invalid number of operands!"); 1144 // FIXME: We really want to scale the value here, but the LDRD/STRD 1145 // instruction don't encode operands that way yet. 1146 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1147 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1148 } 1149 1150 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1151 assert(N == 1 && "Invalid number of operands!"); 1152 // The immediate is scaled by four in the encoding and is stored 1153 // in the MCInst as such. Lop off the low two bits here. 1154 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1155 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1156 } 1157 1158 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1159 assert(N == 1 && "Invalid number of operands!"); 1160 // The immediate is scaled by four in the encoding and is stored 1161 // in the MCInst as such. Lop off the low two bits here. 1162 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1163 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1164 } 1165 1166 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1167 assert(N == 1 && "Invalid number of operands!"); 1168 addExpr(Inst, getImm()); 1169 } 1170 1171 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1172 assert(N == 1 && "Invalid number of operands!"); 1173 addExpr(Inst, getImm()); 1174 } 1175 1176 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1177 assert(N == 1 && "Invalid number of operands!"); 1178 addExpr(Inst, getImm()); 1179 } 1180 1181 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1182 assert(N == 1 && "Invalid number of operands!"); 1183 addExpr(Inst, getImm()); 1184 } 1185 1186 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1187 assert(N == 1 && "Invalid number of operands!"); 1188 // The constant encodes as the immediate-1, and we store in the instruction 1189 // the bits as encoded, so subtract off one here. 1190 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1191 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1192 } 1193 1194 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1195 assert(N == 1 && "Invalid number of operands!"); 1196 // The constant encodes as the immediate-1, and we store in the instruction 1197 // the bits as encoded, so subtract off one here. 1198 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1199 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1200 } 1201 1202 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1203 assert(N == 1 && "Invalid number of operands!"); 1204 addExpr(Inst, getImm()); 1205 } 1206 1207 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1208 assert(N == 1 && "Invalid number of operands!"); 1209 addExpr(Inst, getImm()); 1210 } 1211 1212 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1213 assert(N == 1 && "Invalid number of operands!"); 1214 addExpr(Inst, getImm()); 1215 } 1216 1217 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1218 assert(N == 1 && "Invalid number of operands!"); 1219 // The constant encodes as the immediate, except for 32, which encodes as 1220 // zero. 1221 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1222 unsigned Imm = CE->getValue(); 1223 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1224 } 1225 1226 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1227 assert(N == 1 && "Invalid number of operands!"); 1228 addExpr(Inst, getImm()); 1229 } 1230 1231 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1232 assert(N == 1 && "Invalid number of operands!"); 1233 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1234 // the instruction as well. 1235 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1236 int Val = CE->getValue(); 1237 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1238 } 1239 1240 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1241 assert(N == 1 && "Invalid number of operands!"); 1242 addExpr(Inst, getImm()); 1243 } 1244 1245 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1246 assert(N == 1 && "Invalid number of operands!"); 1247 addExpr(Inst, getImm()); 1248 } 1249 1250 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1251 assert(N == 1 && "Invalid number of operands!"); 1252 addExpr(Inst, getImm()); 1253 } 1254 1255 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1256 assert(N == 1 && "Invalid number of operands!"); 1257 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1258 } 1259 1260 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1261 assert(N == 1 && "Invalid number of operands!"); 1262 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1263 } 1264 1265 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1266 assert(N == 2 && "Invalid number of operands!"); 1267 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1268 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1269 } 1270 1271 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1272 assert(N == 3 && "Invalid number of operands!"); 1273 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1274 if (!Memory.OffsetRegNum) { 1275 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1276 // Special case for #-0 1277 if (Val == INT32_MIN) Val = 0; 1278 if (Val < 0) Val = -Val; 1279 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1280 } else { 1281 // For register offset, we encode the shift type and negation flag 1282 // here. 1283 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1284 Memory.ShiftImm, Memory.ShiftType); 1285 } 1286 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1287 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1288 Inst.addOperand(MCOperand::CreateImm(Val)); 1289 } 1290 1291 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1292 assert(N == 2 && "Invalid number of operands!"); 1293 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1294 assert(CE && "non-constant AM2OffsetImm operand!"); 1295 int32_t Val = CE->getValue(); 1296 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1297 // Special case for #-0 1298 if (Val == INT32_MIN) Val = 0; 1299 if (Val < 0) Val = -Val; 1300 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1301 Inst.addOperand(MCOperand::CreateReg(0)); 1302 Inst.addOperand(MCOperand::CreateImm(Val)); 1303 } 1304 1305 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1306 assert(N == 3 && "Invalid number of operands!"); 1307 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1308 if (!Memory.OffsetRegNum) { 1309 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1310 // Special case for #-0 1311 if (Val == INT32_MIN) Val = 0; 1312 if (Val < 0) Val = -Val; 1313 Val = ARM_AM::getAM3Opc(AddSub, Val); 1314 } else { 1315 // For register offset, we encode the shift type and negation flag 1316 // here. 1317 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1318 } 1319 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1320 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1321 Inst.addOperand(MCOperand::CreateImm(Val)); 1322 } 1323 1324 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1325 assert(N == 2 && "Invalid number of operands!"); 1326 if (Kind == k_PostIndexRegister) { 1327 int32_t Val = 1328 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1329 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1330 Inst.addOperand(MCOperand::CreateImm(Val)); 1331 return; 1332 } 1333 1334 // Constant offset. 1335 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1336 int32_t Val = CE->getValue(); 1337 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1338 // Special case for #-0 1339 if (Val == INT32_MIN) Val = 0; 1340 if (Val < 0) Val = -Val; 1341 Val = ARM_AM::getAM3Opc(AddSub, Val); 1342 Inst.addOperand(MCOperand::CreateReg(0)); 1343 Inst.addOperand(MCOperand::CreateImm(Val)); 1344 } 1345 1346 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1347 assert(N == 2 && "Invalid number of operands!"); 1348 // The lower two bits are always zero and as such are not encoded. 1349 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1350 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1351 // Special case for #-0 1352 if (Val == INT32_MIN) Val = 0; 1353 if (Val < 0) Val = -Val; 1354 Val = ARM_AM::getAM5Opc(AddSub, Val); 1355 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1356 Inst.addOperand(MCOperand::CreateImm(Val)); 1357 } 1358 1359 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1360 assert(N == 2 && "Invalid number of operands!"); 1361 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1362 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1363 Inst.addOperand(MCOperand::CreateImm(Val)); 1364 } 1365 1366 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1367 assert(N == 2 && "Invalid number of operands!"); 1368 // The lower two bits are always zero and as such are not encoded. 1369 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1370 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1371 Inst.addOperand(MCOperand::CreateImm(Val)); 1372 } 1373 1374 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1375 assert(N == 2 && "Invalid number of operands!"); 1376 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1377 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1378 Inst.addOperand(MCOperand::CreateImm(Val)); 1379 } 1380 1381 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1382 addMemImm8OffsetOperands(Inst, N); 1383 } 1384 1385 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1386 addMemImm8OffsetOperands(Inst, N); 1387 } 1388 1389 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1390 assert(N == 2 && "Invalid number of operands!"); 1391 // If this is an immediate, it's a label reference. 1392 if (Kind == k_Immediate) { 1393 addExpr(Inst, getImm()); 1394 Inst.addOperand(MCOperand::CreateImm(0)); 1395 return; 1396 } 1397 1398 // Otherwise, it's a normal memory reg+offset. 1399 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1400 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1401 Inst.addOperand(MCOperand::CreateImm(Val)); 1402 } 1403 1404 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1405 assert(N == 2 && "Invalid number of operands!"); 1406 // If this is an immediate, it's a label reference. 1407 if (Kind == k_Immediate) { 1408 addExpr(Inst, getImm()); 1409 Inst.addOperand(MCOperand::CreateImm(0)); 1410 return; 1411 } 1412 1413 // Otherwise, it's a normal memory reg+offset. 1414 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1415 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1416 Inst.addOperand(MCOperand::CreateImm(Val)); 1417 } 1418 1419 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1420 assert(N == 2 && "Invalid number of operands!"); 1421 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1422 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1423 } 1424 1425 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1426 assert(N == 2 && "Invalid number of operands!"); 1427 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1428 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1429 } 1430 1431 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1432 assert(N == 3 && "Invalid number of operands!"); 1433 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1434 Memory.ShiftImm, Memory.ShiftType); 1435 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1436 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1437 Inst.addOperand(MCOperand::CreateImm(Val)); 1438 } 1439 1440 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1441 assert(N == 3 && "Invalid number of operands!"); 1442 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1443 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1444 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1445 } 1446 1447 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1448 assert(N == 2 && "Invalid number of operands!"); 1449 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1450 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1451 } 1452 1453 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1454 assert(N == 2 && "Invalid number of operands!"); 1455 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1456 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1457 Inst.addOperand(MCOperand::CreateImm(Val)); 1458 } 1459 1460 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1461 assert(N == 2 && "Invalid number of operands!"); 1462 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1463 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1464 Inst.addOperand(MCOperand::CreateImm(Val)); 1465 } 1466 1467 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1468 assert(N == 2 && "Invalid number of operands!"); 1469 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1470 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1471 Inst.addOperand(MCOperand::CreateImm(Val)); 1472 } 1473 1474 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1475 assert(N == 2 && "Invalid number of operands!"); 1476 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1477 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1478 Inst.addOperand(MCOperand::CreateImm(Val)); 1479 } 1480 1481 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1482 assert(N == 1 && "Invalid number of operands!"); 1483 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1484 assert(CE && "non-constant post-idx-imm8 operand!"); 1485 int Imm = CE->getValue(); 1486 bool isAdd = Imm >= 0; 1487 if (Imm == INT32_MIN) Imm = 0; 1488 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1489 Inst.addOperand(MCOperand::CreateImm(Imm)); 1490 } 1491 1492 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1493 assert(N == 1 && "Invalid number of operands!"); 1494 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1495 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1496 int Imm = CE->getValue(); 1497 bool isAdd = Imm >= 0; 1498 if (Imm == INT32_MIN) Imm = 0; 1499 // Immediate is scaled by 4. 1500 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1501 Inst.addOperand(MCOperand::CreateImm(Imm)); 1502 } 1503 1504 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1505 assert(N == 2 && "Invalid number of operands!"); 1506 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1507 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1508 } 1509 1510 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1511 assert(N == 2 && "Invalid number of operands!"); 1512 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1513 // The sign, shift type, and shift amount are encoded in a single operand 1514 // using the AM2 encoding helpers. 1515 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1516 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1517 PostIdxReg.ShiftTy); 1518 Inst.addOperand(MCOperand::CreateImm(Imm)); 1519 } 1520 1521 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1522 assert(N == 1 && "Invalid number of operands!"); 1523 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1524 } 1525 1526 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1527 assert(N == 1 && "Invalid number of operands!"); 1528 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1529 } 1530 1531 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1532 assert(N == 1 && "Invalid number of operands!"); 1533 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1534 } 1535 1536 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1537 assert(N == 1 && "Invalid number of operands!"); 1538 // Only the first register actually goes on the instruction. The rest 1539 // are implied by the opcode. 1540 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1541 } 1542 1543 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1544 assert(N == 1 && "Invalid number of operands!"); 1545 // Only the first register actually goes on the instruction. The rest 1546 // are implied by the opcode. 1547 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1548 } 1549 1550 void addVecListFourDOperands(MCInst &Inst, unsigned N) const { 1551 assert(N == 1 && "Invalid number of operands!"); 1552 // Only the first register actually goes on the instruction. The rest 1553 // are implied by the opcode. 1554 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1555 } 1556 1557 void addVecListTwoQOperands(MCInst &Inst, unsigned N) const { 1558 assert(N == 1 && "Invalid number of operands!"); 1559 // Only the first register actually goes on the instruction. The rest 1560 // are implied by the opcode. 1561 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1562 } 1563 1564 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1565 assert(N == 1 && "Invalid number of operands!"); 1566 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1567 } 1568 1569 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1570 assert(N == 1 && "Invalid number of operands!"); 1571 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1572 } 1573 1574 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1575 assert(N == 1 && "Invalid number of operands!"); 1576 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1577 } 1578 1579 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1580 assert(N == 1 && "Invalid number of operands!"); 1581 // The immediate encodes the type of constant as well as the value. 1582 // Mask in that this is an i8 splat. 1583 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1584 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1585 } 1586 1587 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1588 assert(N == 1 && "Invalid number of operands!"); 1589 // The immediate encodes the type of constant as well as the value. 1590 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1591 unsigned Value = CE->getValue(); 1592 if (Value >= 256) 1593 Value = (Value >> 8) | 0xa00; 1594 else 1595 Value |= 0x800; 1596 Inst.addOperand(MCOperand::CreateImm(Value)); 1597 } 1598 1599 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1600 assert(N == 1 && "Invalid number of operands!"); 1601 // The immediate encodes the type of constant as well as the value. 1602 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1603 unsigned Value = CE->getValue(); 1604 if (Value >= 256 && Value <= 0xff00) 1605 Value = (Value >> 8) | 0x200; 1606 else if (Value > 0xffff && Value <= 0xff0000) 1607 Value = (Value >> 16) | 0x400; 1608 else if (Value > 0xffffff) 1609 Value = (Value >> 24) | 0x600; 1610 Inst.addOperand(MCOperand::CreateImm(Value)); 1611 } 1612 1613 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1614 assert(N == 1 && "Invalid number of operands!"); 1615 // The immediate encodes the type of constant as well as the value. 1616 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1617 unsigned Value = CE->getValue(); 1618 if (Value >= 256 && Value <= 0xffff) 1619 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1620 else if (Value > 0xffff && Value <= 0xffffff) 1621 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1622 else if (Value > 0xffffff) 1623 Value = (Value >> 24) | 0x600; 1624 Inst.addOperand(MCOperand::CreateImm(Value)); 1625 } 1626 1627 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1628 assert(N == 1 && "Invalid number of operands!"); 1629 // The immediate encodes the type of constant as well as the value. 1630 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1631 uint64_t Value = CE->getValue(); 1632 unsigned Imm = 0; 1633 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1634 Imm |= (Value & 1) << i; 1635 } 1636 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1637 } 1638 1639 virtual void print(raw_ostream &OS) const; 1640 1641 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1642 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1643 Op->ITMask.Mask = Mask; 1644 Op->StartLoc = S; 1645 Op->EndLoc = S; 1646 return Op; 1647 } 1648 1649 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1650 ARMOperand *Op = new ARMOperand(k_CondCode); 1651 Op->CC.Val = CC; 1652 Op->StartLoc = S; 1653 Op->EndLoc = S; 1654 return Op; 1655 } 1656 1657 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1658 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1659 Op->Cop.Val = CopVal; 1660 Op->StartLoc = S; 1661 Op->EndLoc = S; 1662 return Op; 1663 } 1664 1665 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1666 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1667 Op->Cop.Val = CopVal; 1668 Op->StartLoc = S; 1669 Op->EndLoc = S; 1670 return Op; 1671 } 1672 1673 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1674 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1675 Op->Cop.Val = Val; 1676 Op->StartLoc = S; 1677 Op->EndLoc = E; 1678 return Op; 1679 } 1680 1681 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1682 ARMOperand *Op = new ARMOperand(k_CCOut); 1683 Op->Reg.RegNum = RegNum; 1684 Op->StartLoc = S; 1685 Op->EndLoc = S; 1686 return Op; 1687 } 1688 1689 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1690 ARMOperand *Op = new ARMOperand(k_Token); 1691 Op->Tok.Data = Str.data(); 1692 Op->Tok.Length = Str.size(); 1693 Op->StartLoc = S; 1694 Op->EndLoc = S; 1695 return Op; 1696 } 1697 1698 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1699 ARMOperand *Op = new ARMOperand(k_Register); 1700 Op->Reg.RegNum = RegNum; 1701 Op->StartLoc = S; 1702 Op->EndLoc = E; 1703 return Op; 1704 } 1705 1706 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1707 unsigned SrcReg, 1708 unsigned ShiftReg, 1709 unsigned ShiftImm, 1710 SMLoc S, SMLoc E) { 1711 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1712 Op->RegShiftedReg.ShiftTy = ShTy; 1713 Op->RegShiftedReg.SrcReg = SrcReg; 1714 Op->RegShiftedReg.ShiftReg = ShiftReg; 1715 Op->RegShiftedReg.ShiftImm = ShiftImm; 1716 Op->StartLoc = S; 1717 Op->EndLoc = E; 1718 return Op; 1719 } 1720 1721 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1722 unsigned SrcReg, 1723 unsigned ShiftImm, 1724 SMLoc S, SMLoc E) { 1725 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1726 Op->RegShiftedImm.ShiftTy = ShTy; 1727 Op->RegShiftedImm.SrcReg = SrcReg; 1728 Op->RegShiftedImm.ShiftImm = ShiftImm; 1729 Op->StartLoc = S; 1730 Op->EndLoc = E; 1731 return Op; 1732 } 1733 1734 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1735 SMLoc S, SMLoc E) { 1736 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1737 Op->ShifterImm.isASR = isASR; 1738 Op->ShifterImm.Imm = Imm; 1739 Op->StartLoc = S; 1740 Op->EndLoc = E; 1741 return Op; 1742 } 1743 1744 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1745 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1746 Op->RotImm.Imm = Imm; 1747 Op->StartLoc = S; 1748 Op->EndLoc = E; 1749 return Op; 1750 } 1751 1752 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1753 SMLoc S, SMLoc E) { 1754 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1755 Op->Bitfield.LSB = LSB; 1756 Op->Bitfield.Width = Width; 1757 Op->StartLoc = S; 1758 Op->EndLoc = E; 1759 return Op; 1760 } 1761 1762 static ARMOperand * 1763 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1764 SMLoc StartLoc, SMLoc EndLoc) { 1765 KindTy Kind = k_RegisterList; 1766 1767 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1768 Kind = k_DPRRegisterList; 1769 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1770 contains(Regs.front().first)) 1771 Kind = k_SPRRegisterList; 1772 1773 ARMOperand *Op = new ARMOperand(Kind); 1774 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1775 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1776 Op->Registers.push_back(I->first); 1777 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1778 Op->StartLoc = StartLoc; 1779 Op->EndLoc = EndLoc; 1780 return Op; 1781 } 1782 1783 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1784 SMLoc S, SMLoc E) { 1785 ARMOperand *Op = new ARMOperand(k_VectorList); 1786 Op->VectorList.RegNum = RegNum; 1787 Op->VectorList.Count = Count; 1788 Op->StartLoc = S; 1789 Op->EndLoc = E; 1790 return Op; 1791 } 1792 1793 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1794 MCContext &Ctx) { 1795 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1796 Op->VectorIndex.Val = Idx; 1797 Op->StartLoc = S; 1798 Op->EndLoc = E; 1799 return Op; 1800 } 1801 1802 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1803 ARMOperand *Op = new ARMOperand(k_Immediate); 1804 Op->Imm.Val = Val; 1805 Op->StartLoc = S; 1806 Op->EndLoc = E; 1807 return Op; 1808 } 1809 1810 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1811 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1812 Op->FPImm.Val = Val; 1813 Op->StartLoc = S; 1814 Op->EndLoc = S; 1815 return Op; 1816 } 1817 1818 static ARMOperand *CreateMem(unsigned BaseRegNum, 1819 const MCConstantExpr *OffsetImm, 1820 unsigned OffsetRegNum, 1821 ARM_AM::ShiftOpc ShiftType, 1822 unsigned ShiftImm, 1823 unsigned Alignment, 1824 bool isNegative, 1825 SMLoc S, SMLoc E) { 1826 ARMOperand *Op = new ARMOperand(k_Memory); 1827 Op->Memory.BaseRegNum = BaseRegNum; 1828 Op->Memory.OffsetImm = OffsetImm; 1829 Op->Memory.OffsetRegNum = OffsetRegNum; 1830 Op->Memory.ShiftType = ShiftType; 1831 Op->Memory.ShiftImm = ShiftImm; 1832 Op->Memory.Alignment = Alignment; 1833 Op->Memory.isNegative = isNegative; 1834 Op->StartLoc = S; 1835 Op->EndLoc = E; 1836 return Op; 1837 } 1838 1839 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1840 ARM_AM::ShiftOpc ShiftTy, 1841 unsigned ShiftImm, 1842 SMLoc S, SMLoc E) { 1843 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1844 Op->PostIdxReg.RegNum = RegNum; 1845 Op->PostIdxReg.isAdd = isAdd; 1846 Op->PostIdxReg.ShiftTy = ShiftTy; 1847 Op->PostIdxReg.ShiftImm = ShiftImm; 1848 Op->StartLoc = S; 1849 Op->EndLoc = E; 1850 return Op; 1851 } 1852 1853 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1854 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1855 Op->MBOpt.Val = Opt; 1856 Op->StartLoc = S; 1857 Op->EndLoc = S; 1858 return Op; 1859 } 1860 1861 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1862 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1863 Op->IFlags.Val = IFlags; 1864 Op->StartLoc = S; 1865 Op->EndLoc = S; 1866 return Op; 1867 } 1868 1869 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1870 ARMOperand *Op = new ARMOperand(k_MSRMask); 1871 Op->MMask.Val = MMask; 1872 Op->StartLoc = S; 1873 Op->EndLoc = S; 1874 return Op; 1875 } 1876}; 1877 1878} // end anonymous namespace. 1879 1880void ARMOperand::print(raw_ostream &OS) const { 1881 switch (Kind) { 1882 case k_FPImmediate: 1883 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1884 << ") >"; 1885 break; 1886 case k_CondCode: 1887 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1888 break; 1889 case k_CCOut: 1890 OS << "<ccout " << getReg() << ">"; 1891 break; 1892 case k_ITCondMask: { 1893 static const char *MaskStr[] = { 1894 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 1895 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 1896 }; 1897 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1898 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1899 break; 1900 } 1901 case k_CoprocNum: 1902 OS << "<coprocessor number: " << getCoproc() << ">"; 1903 break; 1904 case k_CoprocReg: 1905 OS << "<coprocessor register: " << getCoproc() << ">"; 1906 break; 1907 case k_CoprocOption: 1908 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1909 break; 1910 case k_MSRMask: 1911 OS << "<mask: " << getMSRMask() << ">"; 1912 break; 1913 case k_Immediate: 1914 getImm()->print(OS); 1915 break; 1916 case k_MemBarrierOpt: 1917 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1918 break; 1919 case k_Memory: 1920 OS << "<memory " 1921 << " base:" << Memory.BaseRegNum; 1922 OS << ">"; 1923 break; 1924 case k_PostIndexRegister: 1925 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1926 << PostIdxReg.RegNum; 1927 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1928 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1929 << PostIdxReg.ShiftImm; 1930 OS << ">"; 1931 break; 1932 case k_ProcIFlags: { 1933 OS << "<ARM_PROC::"; 1934 unsigned IFlags = getProcIFlags(); 1935 for (int i=2; i >= 0; --i) 1936 if (IFlags & (1 << i)) 1937 OS << ARM_PROC::IFlagsToString(1 << i); 1938 OS << ">"; 1939 break; 1940 } 1941 case k_Register: 1942 OS << "<register " << getReg() << ">"; 1943 break; 1944 case k_ShifterImmediate: 1945 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1946 << " #" << ShifterImm.Imm << ">"; 1947 break; 1948 case k_ShiftedRegister: 1949 OS << "<so_reg_reg " 1950 << RegShiftedReg.SrcReg 1951 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1952 << ", " << RegShiftedReg.ShiftReg << ", " 1953 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1954 << ">"; 1955 break; 1956 case k_ShiftedImmediate: 1957 OS << "<so_reg_imm " 1958 << RegShiftedImm.SrcReg 1959 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1960 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1961 << ">"; 1962 break; 1963 case k_RotateImmediate: 1964 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1965 break; 1966 case k_BitfieldDescriptor: 1967 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1968 << ", width: " << Bitfield.Width << ">"; 1969 break; 1970 case k_RegisterList: 1971 case k_DPRRegisterList: 1972 case k_SPRRegisterList: { 1973 OS << "<register_list "; 1974 1975 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1976 for (SmallVectorImpl<unsigned>::const_iterator 1977 I = RegList.begin(), E = RegList.end(); I != E; ) { 1978 OS << *I; 1979 if (++I < E) OS << ", "; 1980 } 1981 1982 OS << ">"; 1983 break; 1984 } 1985 case k_VectorList: 1986 OS << "<vector_list " << VectorList.Count << " * " 1987 << VectorList.RegNum << ">"; 1988 break; 1989 case k_Token: 1990 OS << "'" << getToken() << "'"; 1991 break; 1992 case k_VectorIndex: 1993 OS << "<vectorindex " << getVectorIndex() << ">"; 1994 break; 1995 } 1996} 1997 1998/// @name Auto-generated Match Functions 1999/// { 2000 2001static unsigned MatchRegisterName(StringRef Name); 2002 2003/// } 2004 2005bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2006 SMLoc &StartLoc, SMLoc &EndLoc) { 2007 RegNo = tryParseRegister(); 2008 2009 return (RegNo == (unsigned)-1); 2010} 2011 2012/// Try to parse a register name. The token must be an Identifier when called, 2013/// and if it is a register name the token is eaten and the register number is 2014/// returned. Otherwise return -1. 2015/// 2016int ARMAsmParser::tryParseRegister() { 2017 const AsmToken &Tok = Parser.getTok(); 2018 if (Tok.isNot(AsmToken::Identifier)) return -1; 2019 2020 // FIXME: Validate register for the current architecture; we have to do 2021 // validation later, so maybe there is no need for this here. 2022 std::string upperCase = Tok.getString().str(); 2023 std::string lowerCase = LowercaseString(upperCase); 2024 unsigned RegNum = MatchRegisterName(lowerCase); 2025 if (!RegNum) { 2026 RegNum = StringSwitch<unsigned>(lowerCase) 2027 .Case("r13", ARM::SP) 2028 .Case("r14", ARM::LR) 2029 .Case("r15", ARM::PC) 2030 .Case("ip", ARM::R12) 2031 .Default(0); 2032 } 2033 if (!RegNum) return -1; 2034 2035 Parser.Lex(); // Eat identifier token. 2036 2037 return RegNum; 2038} 2039 2040// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2041// If a recoverable error occurs, return 1. If an irrecoverable error 2042// occurs, return -1. An irrecoverable error is one where tokens have been 2043// consumed in the process of trying to parse the shifter (i.e., when it is 2044// indeed a shifter operand, but malformed). 2045int ARMAsmParser::tryParseShiftRegister( 2046 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2047 SMLoc S = Parser.getTok().getLoc(); 2048 const AsmToken &Tok = Parser.getTok(); 2049 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2050 2051 std::string upperCase = Tok.getString().str(); 2052 std::string lowerCase = LowercaseString(upperCase); 2053 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2054 .Case("lsl", ARM_AM::lsl) 2055 .Case("lsr", ARM_AM::lsr) 2056 .Case("asr", ARM_AM::asr) 2057 .Case("ror", ARM_AM::ror) 2058 .Case("rrx", ARM_AM::rrx) 2059 .Default(ARM_AM::no_shift); 2060 2061 if (ShiftTy == ARM_AM::no_shift) 2062 return 1; 2063 2064 Parser.Lex(); // Eat the operator. 2065 2066 // The source register for the shift has already been added to the 2067 // operand list, so we need to pop it off and combine it into the shifted 2068 // register operand instead. 2069 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2070 if (!PrevOp->isReg()) 2071 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2072 int SrcReg = PrevOp->getReg(); 2073 int64_t Imm = 0; 2074 int ShiftReg = 0; 2075 if (ShiftTy == ARM_AM::rrx) { 2076 // RRX Doesn't have an explicit shift amount. The encoder expects 2077 // the shift register to be the same as the source register. Seems odd, 2078 // but OK. 2079 ShiftReg = SrcReg; 2080 } else { 2081 // Figure out if this is shifted by a constant or a register (for non-RRX). 2082 if (Parser.getTok().is(AsmToken::Hash)) { 2083 Parser.Lex(); // Eat hash. 2084 SMLoc ImmLoc = Parser.getTok().getLoc(); 2085 const MCExpr *ShiftExpr = 0; 2086 if (getParser().ParseExpression(ShiftExpr)) { 2087 Error(ImmLoc, "invalid immediate shift value"); 2088 return -1; 2089 } 2090 // The expression must be evaluatable as an immediate. 2091 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2092 if (!CE) { 2093 Error(ImmLoc, "invalid immediate shift value"); 2094 return -1; 2095 } 2096 // Range check the immediate. 2097 // lsl, ror: 0 <= imm <= 31 2098 // lsr, asr: 0 <= imm <= 32 2099 Imm = CE->getValue(); 2100 if (Imm < 0 || 2101 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2102 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2103 Error(ImmLoc, "immediate shift value out of range"); 2104 return -1; 2105 } 2106 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2107 ShiftReg = tryParseRegister(); 2108 SMLoc L = Parser.getTok().getLoc(); 2109 if (ShiftReg == -1) { 2110 Error (L, "expected immediate or register in shift operand"); 2111 return -1; 2112 } 2113 } else { 2114 Error (Parser.getTok().getLoc(), 2115 "expected immediate or register in shift operand"); 2116 return -1; 2117 } 2118 } 2119 2120 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2121 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2122 ShiftReg, Imm, 2123 S, Parser.getTok().getLoc())); 2124 else 2125 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2126 S, Parser.getTok().getLoc())); 2127 2128 return 0; 2129} 2130 2131 2132/// Try to parse a register name. The token must be an Identifier when called. 2133/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2134/// if there is a "writeback". 'true' if it's not a register. 2135/// 2136/// TODO this is likely to change to allow different register types and or to 2137/// parse for a specific register type. 2138bool ARMAsmParser:: 2139tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2140 SMLoc S = Parser.getTok().getLoc(); 2141 int RegNo = tryParseRegister(); 2142 if (RegNo == -1) 2143 return true; 2144 2145 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2146 2147 const AsmToken &ExclaimTok = Parser.getTok(); 2148 if (ExclaimTok.is(AsmToken::Exclaim)) { 2149 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2150 ExclaimTok.getLoc())); 2151 Parser.Lex(); // Eat exclaim token 2152 return false; 2153 } 2154 2155 // Also check for an index operand. This is only legal for vector registers, 2156 // but that'll get caught OK in operand matching, so we don't need to 2157 // explicitly filter everything else out here. 2158 if (Parser.getTok().is(AsmToken::LBrac)) { 2159 SMLoc SIdx = Parser.getTok().getLoc(); 2160 Parser.Lex(); // Eat left bracket token. 2161 2162 const MCExpr *ImmVal; 2163 if (getParser().ParseExpression(ImmVal)) 2164 return MatchOperand_ParseFail; 2165 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2166 if (!MCE) { 2167 TokError("immediate value expected for vector index"); 2168 return MatchOperand_ParseFail; 2169 } 2170 2171 SMLoc E = Parser.getTok().getLoc(); 2172 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2173 Error(E, "']' expected"); 2174 return MatchOperand_ParseFail; 2175 } 2176 2177 Parser.Lex(); // Eat right bracket token. 2178 2179 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2180 SIdx, E, 2181 getContext())); 2182 } 2183 2184 return false; 2185} 2186 2187/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2188/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2189/// "c5", ... 2190static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2191 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2192 // but efficient. 2193 switch (Name.size()) { 2194 default: break; 2195 case 2: 2196 if (Name[0] != CoprocOp) 2197 return -1; 2198 switch (Name[1]) { 2199 default: return -1; 2200 case '0': return 0; 2201 case '1': return 1; 2202 case '2': return 2; 2203 case '3': return 3; 2204 case '4': return 4; 2205 case '5': return 5; 2206 case '6': return 6; 2207 case '7': return 7; 2208 case '8': return 8; 2209 case '9': return 9; 2210 } 2211 break; 2212 case 3: 2213 if (Name[0] != CoprocOp || Name[1] != '1') 2214 return -1; 2215 switch (Name[2]) { 2216 default: return -1; 2217 case '0': return 10; 2218 case '1': return 11; 2219 case '2': return 12; 2220 case '3': return 13; 2221 case '4': return 14; 2222 case '5': return 15; 2223 } 2224 break; 2225 } 2226 2227 return -1; 2228} 2229 2230/// parseITCondCode - Try to parse a condition code for an IT instruction. 2231ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2232parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2233 SMLoc S = Parser.getTok().getLoc(); 2234 const AsmToken &Tok = Parser.getTok(); 2235 if (!Tok.is(AsmToken::Identifier)) 2236 return MatchOperand_NoMatch; 2237 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2238 .Case("eq", ARMCC::EQ) 2239 .Case("ne", ARMCC::NE) 2240 .Case("hs", ARMCC::HS) 2241 .Case("cs", ARMCC::HS) 2242 .Case("lo", ARMCC::LO) 2243 .Case("cc", ARMCC::LO) 2244 .Case("mi", ARMCC::MI) 2245 .Case("pl", ARMCC::PL) 2246 .Case("vs", ARMCC::VS) 2247 .Case("vc", ARMCC::VC) 2248 .Case("hi", ARMCC::HI) 2249 .Case("ls", ARMCC::LS) 2250 .Case("ge", ARMCC::GE) 2251 .Case("lt", ARMCC::LT) 2252 .Case("gt", ARMCC::GT) 2253 .Case("le", ARMCC::LE) 2254 .Case("al", ARMCC::AL) 2255 .Default(~0U); 2256 if (CC == ~0U) 2257 return MatchOperand_NoMatch; 2258 Parser.Lex(); // Eat the token. 2259 2260 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2261 2262 return MatchOperand_Success; 2263} 2264 2265/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2266/// token must be an Identifier when called, and if it is a coprocessor 2267/// number, the token is eaten and the operand is added to the operand list. 2268ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2269parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2270 SMLoc S = Parser.getTok().getLoc(); 2271 const AsmToken &Tok = Parser.getTok(); 2272 if (Tok.isNot(AsmToken::Identifier)) 2273 return MatchOperand_NoMatch; 2274 2275 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2276 if (Num == -1) 2277 return MatchOperand_NoMatch; 2278 2279 Parser.Lex(); // Eat identifier token. 2280 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2281 return MatchOperand_Success; 2282} 2283 2284/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2285/// token must be an Identifier when called, and if it is a coprocessor 2286/// number, the token is eaten and the operand is added to the operand list. 2287ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2288parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2289 SMLoc S = Parser.getTok().getLoc(); 2290 const AsmToken &Tok = Parser.getTok(); 2291 if (Tok.isNot(AsmToken::Identifier)) 2292 return MatchOperand_NoMatch; 2293 2294 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2295 if (Reg == -1) 2296 return MatchOperand_NoMatch; 2297 2298 Parser.Lex(); // Eat identifier token. 2299 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2300 return MatchOperand_Success; 2301} 2302 2303/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2304/// coproc_option : '{' imm0_255 '}' 2305ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2306parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2307 SMLoc S = Parser.getTok().getLoc(); 2308 2309 // If this isn't a '{', this isn't a coprocessor immediate operand. 2310 if (Parser.getTok().isNot(AsmToken::LCurly)) 2311 return MatchOperand_NoMatch; 2312 Parser.Lex(); // Eat the '{' 2313 2314 const MCExpr *Expr; 2315 SMLoc Loc = Parser.getTok().getLoc(); 2316 if (getParser().ParseExpression(Expr)) { 2317 Error(Loc, "illegal expression"); 2318 return MatchOperand_ParseFail; 2319 } 2320 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2321 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2322 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2323 return MatchOperand_ParseFail; 2324 } 2325 int Val = CE->getValue(); 2326 2327 // Check for and consume the closing '}' 2328 if (Parser.getTok().isNot(AsmToken::RCurly)) 2329 return MatchOperand_ParseFail; 2330 SMLoc E = Parser.getTok().getLoc(); 2331 Parser.Lex(); // Eat the '}' 2332 2333 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2334 return MatchOperand_Success; 2335} 2336 2337// For register list parsing, we need to map from raw GPR register numbering 2338// to the enumeration values. The enumeration values aren't sorted by 2339// register number due to our using "sp", "lr" and "pc" as canonical names. 2340static unsigned getNextRegister(unsigned Reg) { 2341 // If this is a GPR, we need to do it manually, otherwise we can rely 2342 // on the sort ordering of the enumeration since the other reg-classes 2343 // are sane. 2344 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2345 return Reg + 1; 2346 switch(Reg) { 2347 default: assert(0 && "Invalid GPR number!"); 2348 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2349 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2350 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2351 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2352 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2353 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2354 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2355 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2356 } 2357} 2358 2359/// Parse a register list. 2360bool ARMAsmParser:: 2361parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2362 assert(Parser.getTok().is(AsmToken::LCurly) && 2363 "Token is not a Left Curly Brace"); 2364 SMLoc S = Parser.getTok().getLoc(); 2365 Parser.Lex(); // Eat '{' token. 2366 SMLoc RegLoc = Parser.getTok().getLoc(); 2367 2368 // Check the first register in the list to see what register class 2369 // this is a list of. 2370 int Reg = tryParseRegister(); 2371 if (Reg == -1) 2372 return Error(RegLoc, "register expected"); 2373 2374 const MCRegisterClass *RC; 2375 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2376 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2377 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2378 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2379 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2380 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2381 else 2382 return Error(RegLoc, "invalid register in register list"); 2383 2384 // The reglist instructions have at most 16 registers, so reserve 2385 // space for that many. 2386 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2387 // Store the first register. 2388 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2389 2390 // This starts immediately after the first register token in the list, 2391 // so we can see either a comma or a minus (range separator) as a legal 2392 // next token. 2393 while (Parser.getTok().is(AsmToken::Comma) || 2394 Parser.getTok().is(AsmToken::Minus)) { 2395 if (Parser.getTok().is(AsmToken::Minus)) { 2396 Parser.Lex(); // Eat the comma. 2397 SMLoc EndLoc = Parser.getTok().getLoc(); 2398 int EndReg = tryParseRegister(); 2399 if (EndReg == -1) 2400 return Error(EndLoc, "register expected"); 2401 // If the register is the same as the start reg, there's nothing 2402 // more to do. 2403 if (Reg == EndReg) 2404 continue; 2405 // The register must be in the same register class as the first. 2406 if (!RC->contains(EndReg)) 2407 return Error(EndLoc, "invalid register in register list"); 2408 // Ranges must go from low to high. 2409 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2410 return Error(EndLoc, "bad range in register list"); 2411 2412 // Add all the registers in the range to the register list. 2413 while (Reg != EndReg) { 2414 Reg = getNextRegister(Reg); 2415 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2416 } 2417 continue; 2418 } 2419 Parser.Lex(); // Eat the comma. 2420 RegLoc = Parser.getTok().getLoc(); 2421 int OldReg = Reg; 2422 Reg = tryParseRegister(); 2423 if (Reg == -1) 2424 return Error(RegLoc, "register expected"); 2425 // The register must be in the same register class as the first. 2426 if (!RC->contains(Reg)) 2427 return Error(RegLoc, "invalid register in register list"); 2428 // List must be monotonically increasing. 2429 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2430 return Error(RegLoc, "register list not in ascending order"); 2431 // VFP register lists must also be contiguous. 2432 // It's OK to use the enumeration values directly here rather, as the 2433 // VFP register classes have the enum sorted properly. 2434 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2435 Reg != OldReg + 1) 2436 return Error(RegLoc, "non-contiguous register range"); 2437 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2438 } 2439 2440 SMLoc E = Parser.getTok().getLoc(); 2441 if (Parser.getTok().isNot(AsmToken::RCurly)) 2442 return Error(E, "'}' expected"); 2443 Parser.Lex(); // Eat '}' token. 2444 2445 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2446 return false; 2447} 2448 2449// parse a vector register list 2450ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2451parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2452 if(Parser.getTok().isNot(AsmToken::LCurly)) 2453 return MatchOperand_NoMatch; 2454 2455 SMLoc S = Parser.getTok().getLoc(); 2456 Parser.Lex(); // Eat '{' token. 2457 SMLoc RegLoc = Parser.getTok().getLoc(); 2458 2459 int Reg = tryParseRegister(); 2460 if (Reg == -1) { 2461 Error(RegLoc, "register expected"); 2462 return MatchOperand_ParseFail; 2463 } 2464 2465 unsigned FirstReg = Reg; 2466 unsigned Count = 1; 2467 while (Parser.getTok().is(AsmToken::Comma)) { 2468 Parser.Lex(); // Eat the comma. 2469 RegLoc = Parser.getTok().getLoc(); 2470 int OldReg = Reg; 2471 Reg = tryParseRegister(); 2472 if (Reg == -1) { 2473 Error(RegLoc, "register expected"); 2474 return MatchOperand_ParseFail; 2475 } 2476 // vector register lists must also be contiguous. 2477 // It's OK to use the enumeration values directly here rather, as the 2478 // VFP register classes have the enum sorted properly. 2479 if (Reg != OldReg + 1) { 2480 Error(RegLoc, "non-contiguous register range"); 2481 return MatchOperand_ParseFail; 2482 } 2483 2484 ++Count; 2485 } 2486 2487 SMLoc E = Parser.getTok().getLoc(); 2488 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2489 Error(E, "'}' expected"); 2490 return MatchOperand_ParseFail; 2491 } 2492 Parser.Lex(); // Eat '}' token. 2493 2494 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2495 return MatchOperand_Success; 2496} 2497 2498/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2499ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2500parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2501 SMLoc S = Parser.getTok().getLoc(); 2502 const AsmToken &Tok = Parser.getTok(); 2503 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2504 StringRef OptStr = Tok.getString(); 2505 2506 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2507 .Case("sy", ARM_MB::SY) 2508 .Case("st", ARM_MB::ST) 2509 .Case("sh", ARM_MB::ISH) 2510 .Case("ish", ARM_MB::ISH) 2511 .Case("shst", ARM_MB::ISHST) 2512 .Case("ishst", ARM_MB::ISHST) 2513 .Case("nsh", ARM_MB::NSH) 2514 .Case("un", ARM_MB::NSH) 2515 .Case("nshst", ARM_MB::NSHST) 2516 .Case("unst", ARM_MB::NSHST) 2517 .Case("osh", ARM_MB::OSH) 2518 .Case("oshst", ARM_MB::OSHST) 2519 .Default(~0U); 2520 2521 if (Opt == ~0U) 2522 return MatchOperand_NoMatch; 2523 2524 Parser.Lex(); // Eat identifier token. 2525 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2526 return MatchOperand_Success; 2527} 2528 2529/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2530ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2531parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2532 SMLoc S = Parser.getTok().getLoc(); 2533 const AsmToken &Tok = Parser.getTok(); 2534 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2535 StringRef IFlagsStr = Tok.getString(); 2536 2537 // An iflags string of "none" is interpreted to mean that none of the AIF 2538 // bits are set. Not a terribly useful instruction, but a valid encoding. 2539 unsigned IFlags = 0; 2540 if (IFlagsStr != "none") { 2541 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2542 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2543 .Case("a", ARM_PROC::A) 2544 .Case("i", ARM_PROC::I) 2545 .Case("f", ARM_PROC::F) 2546 .Default(~0U); 2547 2548 // If some specific iflag is already set, it means that some letter is 2549 // present more than once, this is not acceptable. 2550 if (Flag == ~0U || (IFlags & Flag)) 2551 return MatchOperand_NoMatch; 2552 2553 IFlags |= Flag; 2554 } 2555 } 2556 2557 Parser.Lex(); // Eat identifier token. 2558 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2559 return MatchOperand_Success; 2560} 2561 2562/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2563ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2564parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2565 SMLoc S = Parser.getTok().getLoc(); 2566 const AsmToken &Tok = Parser.getTok(); 2567 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2568 StringRef Mask = Tok.getString(); 2569 2570 if (isMClass()) { 2571 // See ARMv6-M 10.1.1 2572 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2573 .Case("apsr", 0) 2574 .Case("iapsr", 1) 2575 .Case("eapsr", 2) 2576 .Case("xpsr", 3) 2577 .Case("ipsr", 5) 2578 .Case("epsr", 6) 2579 .Case("iepsr", 7) 2580 .Case("msp", 8) 2581 .Case("psp", 9) 2582 .Case("primask", 16) 2583 .Case("basepri", 17) 2584 .Case("basepri_max", 18) 2585 .Case("faultmask", 19) 2586 .Case("control", 20) 2587 .Default(~0U); 2588 2589 if (FlagsVal == ~0U) 2590 return MatchOperand_NoMatch; 2591 2592 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2593 // basepri, basepri_max and faultmask only valid for V7m. 2594 return MatchOperand_NoMatch; 2595 2596 Parser.Lex(); // Eat identifier token. 2597 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2598 return MatchOperand_Success; 2599 } 2600 2601 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2602 size_t Start = 0, Next = Mask.find('_'); 2603 StringRef Flags = ""; 2604 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2605 if (Next != StringRef::npos) 2606 Flags = Mask.slice(Next+1, Mask.size()); 2607 2608 // FlagsVal contains the complete mask: 2609 // 3-0: Mask 2610 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2611 unsigned FlagsVal = 0; 2612 2613 if (SpecReg == "apsr") { 2614 FlagsVal = StringSwitch<unsigned>(Flags) 2615 .Case("nzcvq", 0x8) // same as CPSR_f 2616 .Case("g", 0x4) // same as CPSR_s 2617 .Case("nzcvqg", 0xc) // same as CPSR_fs 2618 .Default(~0U); 2619 2620 if (FlagsVal == ~0U) { 2621 if (!Flags.empty()) 2622 return MatchOperand_NoMatch; 2623 else 2624 FlagsVal = 8; // No flag 2625 } 2626 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2627 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2628 Flags = "fc"; 2629 for (int i = 0, e = Flags.size(); i != e; ++i) { 2630 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2631 .Case("c", 1) 2632 .Case("x", 2) 2633 .Case("s", 4) 2634 .Case("f", 8) 2635 .Default(~0U); 2636 2637 // If some specific flag is already set, it means that some letter is 2638 // present more than once, this is not acceptable. 2639 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2640 return MatchOperand_NoMatch; 2641 FlagsVal |= Flag; 2642 } 2643 } else // No match for special register. 2644 return MatchOperand_NoMatch; 2645 2646 // Special register without flags is NOT equivalent to "fc" flags. 2647 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2648 // two lines would enable gas compatibility at the expense of breaking 2649 // round-tripping. 2650 // 2651 // if (!FlagsVal) 2652 // FlagsVal = 0x9; 2653 2654 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2655 if (SpecReg == "spsr") 2656 FlagsVal |= 16; 2657 2658 Parser.Lex(); // Eat identifier token. 2659 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2660 return MatchOperand_Success; 2661} 2662 2663ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2664parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2665 int Low, int High) { 2666 const AsmToken &Tok = Parser.getTok(); 2667 if (Tok.isNot(AsmToken::Identifier)) { 2668 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2669 return MatchOperand_ParseFail; 2670 } 2671 StringRef ShiftName = Tok.getString(); 2672 std::string LowerOp = LowercaseString(Op); 2673 std::string UpperOp = UppercaseString(Op); 2674 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2675 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2676 return MatchOperand_ParseFail; 2677 } 2678 Parser.Lex(); // Eat shift type token. 2679 2680 // There must be a '#' and a shift amount. 2681 if (Parser.getTok().isNot(AsmToken::Hash)) { 2682 Error(Parser.getTok().getLoc(), "'#' expected"); 2683 return MatchOperand_ParseFail; 2684 } 2685 Parser.Lex(); // Eat hash token. 2686 2687 const MCExpr *ShiftAmount; 2688 SMLoc Loc = Parser.getTok().getLoc(); 2689 if (getParser().ParseExpression(ShiftAmount)) { 2690 Error(Loc, "illegal expression"); 2691 return MatchOperand_ParseFail; 2692 } 2693 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2694 if (!CE) { 2695 Error(Loc, "constant expression expected"); 2696 return MatchOperand_ParseFail; 2697 } 2698 int Val = CE->getValue(); 2699 if (Val < Low || Val > High) { 2700 Error(Loc, "immediate value out of range"); 2701 return MatchOperand_ParseFail; 2702 } 2703 2704 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2705 2706 return MatchOperand_Success; 2707} 2708 2709ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2710parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2711 const AsmToken &Tok = Parser.getTok(); 2712 SMLoc S = Tok.getLoc(); 2713 if (Tok.isNot(AsmToken::Identifier)) { 2714 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2715 return MatchOperand_ParseFail; 2716 } 2717 int Val = StringSwitch<int>(Tok.getString()) 2718 .Case("be", 1) 2719 .Case("le", 0) 2720 .Default(-1); 2721 Parser.Lex(); // Eat the token. 2722 2723 if (Val == -1) { 2724 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2725 return MatchOperand_ParseFail; 2726 } 2727 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2728 getContext()), 2729 S, Parser.getTok().getLoc())); 2730 return MatchOperand_Success; 2731} 2732 2733/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2734/// instructions. Legal values are: 2735/// lsl #n 'n' in [0,31] 2736/// asr #n 'n' in [1,32] 2737/// n == 32 encoded as n == 0. 2738ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2739parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2740 const AsmToken &Tok = Parser.getTok(); 2741 SMLoc S = Tok.getLoc(); 2742 if (Tok.isNot(AsmToken::Identifier)) { 2743 Error(S, "shift operator 'asr' or 'lsl' expected"); 2744 return MatchOperand_ParseFail; 2745 } 2746 StringRef ShiftName = Tok.getString(); 2747 bool isASR; 2748 if (ShiftName == "lsl" || ShiftName == "LSL") 2749 isASR = false; 2750 else if (ShiftName == "asr" || ShiftName == "ASR") 2751 isASR = true; 2752 else { 2753 Error(S, "shift operator 'asr' or 'lsl' expected"); 2754 return MatchOperand_ParseFail; 2755 } 2756 Parser.Lex(); // Eat the operator. 2757 2758 // A '#' and a shift amount. 2759 if (Parser.getTok().isNot(AsmToken::Hash)) { 2760 Error(Parser.getTok().getLoc(), "'#' expected"); 2761 return MatchOperand_ParseFail; 2762 } 2763 Parser.Lex(); // Eat hash token. 2764 2765 const MCExpr *ShiftAmount; 2766 SMLoc E = Parser.getTok().getLoc(); 2767 if (getParser().ParseExpression(ShiftAmount)) { 2768 Error(E, "malformed shift expression"); 2769 return MatchOperand_ParseFail; 2770 } 2771 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2772 if (!CE) { 2773 Error(E, "shift amount must be an immediate"); 2774 return MatchOperand_ParseFail; 2775 } 2776 2777 int64_t Val = CE->getValue(); 2778 if (isASR) { 2779 // Shift amount must be in [1,32] 2780 if (Val < 1 || Val > 32) { 2781 Error(E, "'asr' shift amount must be in range [1,32]"); 2782 return MatchOperand_ParseFail; 2783 } 2784 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2785 if (isThumb() && Val == 32) { 2786 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2787 return MatchOperand_ParseFail; 2788 } 2789 if (Val == 32) Val = 0; 2790 } else { 2791 // Shift amount must be in [1,32] 2792 if (Val < 0 || Val > 31) { 2793 Error(E, "'lsr' shift amount must be in range [0,31]"); 2794 return MatchOperand_ParseFail; 2795 } 2796 } 2797 2798 E = Parser.getTok().getLoc(); 2799 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2800 2801 return MatchOperand_Success; 2802} 2803 2804/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2805/// of instructions. Legal values are: 2806/// ror #n 'n' in {0, 8, 16, 24} 2807ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2808parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2809 const AsmToken &Tok = Parser.getTok(); 2810 SMLoc S = Tok.getLoc(); 2811 if (Tok.isNot(AsmToken::Identifier)) 2812 return MatchOperand_NoMatch; 2813 StringRef ShiftName = Tok.getString(); 2814 if (ShiftName != "ror" && ShiftName != "ROR") 2815 return MatchOperand_NoMatch; 2816 Parser.Lex(); // Eat the operator. 2817 2818 // A '#' and a rotate amount. 2819 if (Parser.getTok().isNot(AsmToken::Hash)) { 2820 Error(Parser.getTok().getLoc(), "'#' expected"); 2821 return MatchOperand_ParseFail; 2822 } 2823 Parser.Lex(); // Eat hash token. 2824 2825 const MCExpr *ShiftAmount; 2826 SMLoc E = Parser.getTok().getLoc(); 2827 if (getParser().ParseExpression(ShiftAmount)) { 2828 Error(E, "malformed rotate expression"); 2829 return MatchOperand_ParseFail; 2830 } 2831 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2832 if (!CE) { 2833 Error(E, "rotate amount must be an immediate"); 2834 return MatchOperand_ParseFail; 2835 } 2836 2837 int64_t Val = CE->getValue(); 2838 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2839 // normally, zero is represented in asm by omitting the rotate operand 2840 // entirely. 2841 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2842 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2843 return MatchOperand_ParseFail; 2844 } 2845 2846 E = Parser.getTok().getLoc(); 2847 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2848 2849 return MatchOperand_Success; 2850} 2851 2852ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2853parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2854 SMLoc S = Parser.getTok().getLoc(); 2855 // The bitfield descriptor is really two operands, the LSB and the width. 2856 if (Parser.getTok().isNot(AsmToken::Hash)) { 2857 Error(Parser.getTok().getLoc(), "'#' expected"); 2858 return MatchOperand_ParseFail; 2859 } 2860 Parser.Lex(); // Eat hash token. 2861 2862 const MCExpr *LSBExpr; 2863 SMLoc E = Parser.getTok().getLoc(); 2864 if (getParser().ParseExpression(LSBExpr)) { 2865 Error(E, "malformed immediate expression"); 2866 return MatchOperand_ParseFail; 2867 } 2868 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2869 if (!CE) { 2870 Error(E, "'lsb' operand must be an immediate"); 2871 return MatchOperand_ParseFail; 2872 } 2873 2874 int64_t LSB = CE->getValue(); 2875 // The LSB must be in the range [0,31] 2876 if (LSB < 0 || LSB > 31) { 2877 Error(E, "'lsb' operand must be in the range [0,31]"); 2878 return MatchOperand_ParseFail; 2879 } 2880 E = Parser.getTok().getLoc(); 2881 2882 // Expect another immediate operand. 2883 if (Parser.getTok().isNot(AsmToken::Comma)) { 2884 Error(Parser.getTok().getLoc(), "too few operands"); 2885 return MatchOperand_ParseFail; 2886 } 2887 Parser.Lex(); // Eat hash token. 2888 if (Parser.getTok().isNot(AsmToken::Hash)) { 2889 Error(Parser.getTok().getLoc(), "'#' expected"); 2890 return MatchOperand_ParseFail; 2891 } 2892 Parser.Lex(); // Eat hash token. 2893 2894 const MCExpr *WidthExpr; 2895 if (getParser().ParseExpression(WidthExpr)) { 2896 Error(E, "malformed immediate expression"); 2897 return MatchOperand_ParseFail; 2898 } 2899 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2900 if (!CE) { 2901 Error(E, "'width' operand must be an immediate"); 2902 return MatchOperand_ParseFail; 2903 } 2904 2905 int64_t Width = CE->getValue(); 2906 // The LSB must be in the range [1,32-lsb] 2907 if (Width < 1 || Width > 32 - LSB) { 2908 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2909 return MatchOperand_ParseFail; 2910 } 2911 E = Parser.getTok().getLoc(); 2912 2913 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2914 2915 return MatchOperand_Success; 2916} 2917 2918ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2919parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2920 // Check for a post-index addressing register operand. Specifically: 2921 // postidx_reg := '+' register {, shift} 2922 // | '-' register {, shift} 2923 // | register {, shift} 2924 2925 // This method must return MatchOperand_NoMatch without consuming any tokens 2926 // in the case where there is no match, as other alternatives take other 2927 // parse methods. 2928 AsmToken Tok = Parser.getTok(); 2929 SMLoc S = Tok.getLoc(); 2930 bool haveEaten = false; 2931 bool isAdd = true; 2932 int Reg = -1; 2933 if (Tok.is(AsmToken::Plus)) { 2934 Parser.Lex(); // Eat the '+' token. 2935 haveEaten = true; 2936 } else if (Tok.is(AsmToken::Minus)) { 2937 Parser.Lex(); // Eat the '-' token. 2938 isAdd = false; 2939 haveEaten = true; 2940 } 2941 if (Parser.getTok().is(AsmToken::Identifier)) 2942 Reg = tryParseRegister(); 2943 if (Reg == -1) { 2944 if (!haveEaten) 2945 return MatchOperand_NoMatch; 2946 Error(Parser.getTok().getLoc(), "register expected"); 2947 return MatchOperand_ParseFail; 2948 } 2949 SMLoc E = Parser.getTok().getLoc(); 2950 2951 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2952 unsigned ShiftImm = 0; 2953 if (Parser.getTok().is(AsmToken::Comma)) { 2954 Parser.Lex(); // Eat the ','. 2955 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2956 return MatchOperand_ParseFail; 2957 } 2958 2959 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2960 ShiftImm, S, E)); 2961 2962 return MatchOperand_Success; 2963} 2964 2965ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2966parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2967 // Check for a post-index addressing register operand. Specifically: 2968 // am3offset := '+' register 2969 // | '-' register 2970 // | register 2971 // | # imm 2972 // | # + imm 2973 // | # - imm 2974 2975 // This method must return MatchOperand_NoMatch without consuming any tokens 2976 // in the case where there is no match, as other alternatives take other 2977 // parse methods. 2978 AsmToken Tok = Parser.getTok(); 2979 SMLoc S = Tok.getLoc(); 2980 2981 // Do immediates first, as we always parse those if we have a '#'. 2982 if (Parser.getTok().is(AsmToken::Hash)) { 2983 Parser.Lex(); // Eat the '#'. 2984 // Explicitly look for a '-', as we need to encode negative zero 2985 // differently. 2986 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2987 const MCExpr *Offset; 2988 if (getParser().ParseExpression(Offset)) 2989 return MatchOperand_ParseFail; 2990 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2991 if (!CE) { 2992 Error(S, "constant expression expected"); 2993 return MatchOperand_ParseFail; 2994 } 2995 SMLoc E = Tok.getLoc(); 2996 // Negative zero is encoded as the flag value INT32_MIN. 2997 int32_t Val = CE->getValue(); 2998 if (isNegative && Val == 0) 2999 Val = INT32_MIN; 3000 3001 Operands.push_back( 3002 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 3003 3004 return MatchOperand_Success; 3005 } 3006 3007 3008 bool haveEaten = false; 3009 bool isAdd = true; 3010 int Reg = -1; 3011 if (Tok.is(AsmToken::Plus)) { 3012 Parser.Lex(); // Eat the '+' token. 3013 haveEaten = true; 3014 } else if (Tok.is(AsmToken::Minus)) { 3015 Parser.Lex(); // Eat the '-' token. 3016 isAdd = false; 3017 haveEaten = true; 3018 } 3019 if (Parser.getTok().is(AsmToken::Identifier)) 3020 Reg = tryParseRegister(); 3021 if (Reg == -1) { 3022 if (!haveEaten) 3023 return MatchOperand_NoMatch; 3024 Error(Parser.getTok().getLoc(), "register expected"); 3025 return MatchOperand_ParseFail; 3026 } 3027 SMLoc E = Parser.getTok().getLoc(); 3028 3029 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3030 0, S, E)); 3031 3032 return MatchOperand_Success; 3033} 3034 3035/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3036/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3037/// when they refer multiple MIOperands inside a single one. 3038bool ARMAsmParser:: 3039cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3040 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3041 // Rt, Rt2 3042 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3043 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3044 // Create a writeback register dummy placeholder. 3045 Inst.addOperand(MCOperand::CreateReg(0)); 3046 // addr 3047 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3048 // pred 3049 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3050 return true; 3051} 3052 3053/// cvtT2StrdPre - Convert parsed operands to MCInst. 3054/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3055/// when they refer multiple MIOperands inside a single one. 3056bool ARMAsmParser:: 3057cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3058 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3059 // Create a writeback register dummy placeholder. 3060 Inst.addOperand(MCOperand::CreateReg(0)); 3061 // Rt, Rt2 3062 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3063 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3064 // addr 3065 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3066 // pred 3067 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3068 return true; 3069} 3070 3071/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3072/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3073/// when they refer multiple MIOperands inside a single one. 3074bool ARMAsmParser:: 3075cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3076 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3077 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3078 3079 // Create a writeback register dummy placeholder. 3080 Inst.addOperand(MCOperand::CreateImm(0)); 3081 3082 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3083 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3084 return true; 3085} 3086 3087/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3088/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3089/// when they refer multiple MIOperands inside a single one. 3090bool ARMAsmParser:: 3091cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3092 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3093 // Create a writeback register dummy placeholder. 3094 Inst.addOperand(MCOperand::CreateImm(0)); 3095 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3096 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3097 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3098 return true; 3099} 3100 3101/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3102/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3103/// when they refer multiple MIOperands inside a single one. 3104bool ARMAsmParser:: 3105cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3106 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3107 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3108 3109 // Create a writeback register dummy placeholder. 3110 Inst.addOperand(MCOperand::CreateImm(0)); 3111 3112 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3113 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3114 return true; 3115} 3116 3117/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3118/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3119/// when they refer multiple MIOperands inside a single one. 3120bool ARMAsmParser:: 3121cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3122 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3123 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3124 3125 // Create a writeback register dummy placeholder. 3126 Inst.addOperand(MCOperand::CreateImm(0)); 3127 3128 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3129 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3130 return true; 3131} 3132 3133 3134/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3135/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3136/// when they refer multiple MIOperands inside a single one. 3137bool ARMAsmParser:: 3138cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3139 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3140 // Create a writeback register dummy placeholder. 3141 Inst.addOperand(MCOperand::CreateImm(0)); 3142 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3143 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3144 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3145 return true; 3146} 3147 3148/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3149/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3150/// when they refer multiple MIOperands inside a single one. 3151bool ARMAsmParser:: 3152cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3153 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3154 // Create a writeback register dummy placeholder. 3155 Inst.addOperand(MCOperand::CreateImm(0)); 3156 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3157 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3158 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3159 return true; 3160} 3161 3162/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3163/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3164/// when they refer multiple MIOperands inside a single one. 3165bool ARMAsmParser:: 3166cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3167 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3168 // Create a writeback register dummy placeholder. 3169 Inst.addOperand(MCOperand::CreateImm(0)); 3170 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3171 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3172 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3173 return true; 3174} 3175 3176/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3177/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3178/// when they refer multiple MIOperands inside a single one. 3179bool ARMAsmParser:: 3180cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3181 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3182 // Rt 3183 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3184 // Create a writeback register dummy placeholder. 3185 Inst.addOperand(MCOperand::CreateImm(0)); 3186 // addr 3187 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3188 // offset 3189 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3190 // pred 3191 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3192 return true; 3193} 3194 3195/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3196/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3197/// when they refer multiple MIOperands inside a single one. 3198bool ARMAsmParser:: 3199cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3200 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3201 // Rt 3202 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3203 // Create a writeback register dummy placeholder. 3204 Inst.addOperand(MCOperand::CreateImm(0)); 3205 // addr 3206 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3207 // offset 3208 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3209 // pred 3210 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3211 return true; 3212} 3213 3214/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3215/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3216/// when they refer multiple MIOperands inside a single one. 3217bool ARMAsmParser:: 3218cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3219 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3220 // Create a writeback register dummy placeholder. 3221 Inst.addOperand(MCOperand::CreateImm(0)); 3222 // Rt 3223 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3224 // addr 3225 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3226 // offset 3227 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3228 // pred 3229 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3230 return true; 3231} 3232 3233/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3234/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3235/// when they refer multiple MIOperands inside a single one. 3236bool ARMAsmParser:: 3237cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3238 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3239 // Create a writeback register dummy placeholder. 3240 Inst.addOperand(MCOperand::CreateImm(0)); 3241 // Rt 3242 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3243 // addr 3244 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3245 // offset 3246 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3247 // pred 3248 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3249 return true; 3250} 3251 3252/// cvtLdrdPre - Convert parsed operands to MCInst. 3253/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3254/// when they refer multiple MIOperands inside a single one. 3255bool ARMAsmParser:: 3256cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3257 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3258 // Rt, Rt2 3259 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3260 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3261 // Create a writeback register dummy placeholder. 3262 Inst.addOperand(MCOperand::CreateImm(0)); 3263 // addr 3264 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3265 // pred 3266 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3267 return true; 3268} 3269 3270/// cvtStrdPre - Convert parsed operands to MCInst. 3271/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3272/// when they refer multiple MIOperands inside a single one. 3273bool ARMAsmParser:: 3274cvtStrdPre(MCInst &Inst, unsigned Opcode, 3275 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3276 // Create a writeback register dummy placeholder. 3277 Inst.addOperand(MCOperand::CreateImm(0)); 3278 // Rt, Rt2 3279 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3280 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3281 // addr 3282 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3283 // pred 3284 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3285 return true; 3286} 3287 3288/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3289/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3290/// when they refer multiple MIOperands inside a single one. 3291bool ARMAsmParser:: 3292cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3293 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3294 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3295 // Create a writeback register dummy placeholder. 3296 Inst.addOperand(MCOperand::CreateImm(0)); 3297 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3298 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3299 return true; 3300} 3301 3302/// cvtThumbMultiple- Convert parsed operands to MCInst. 3303/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3304/// when they refer multiple MIOperands inside a single one. 3305bool ARMAsmParser:: 3306cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3307 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3308 // The second source operand must be the same register as the destination 3309 // operand. 3310 if (Operands.size() == 6 && 3311 (((ARMOperand*)Operands[3])->getReg() != 3312 ((ARMOperand*)Operands[5])->getReg()) && 3313 (((ARMOperand*)Operands[3])->getReg() != 3314 ((ARMOperand*)Operands[4])->getReg())) { 3315 Error(Operands[3]->getStartLoc(), 3316 "destination register must match source register"); 3317 return false; 3318 } 3319 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3320 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3321 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3322 // If we have a three-operand form, use that, else the second source operand 3323 // is just the destination operand again. 3324 if (Operands.size() == 6) 3325 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3326 else 3327 Inst.addOperand(Inst.getOperand(0)); 3328 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3329 3330 return true; 3331} 3332 3333bool ARMAsmParser:: 3334cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3335 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3336 // Vd 3337 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3338 // Create a writeback register dummy placeholder. 3339 Inst.addOperand(MCOperand::CreateImm(0)); 3340 // Vn 3341 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3342 // pred 3343 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3344 return true; 3345} 3346 3347bool ARMAsmParser:: 3348cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3349 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3350 // Vd 3351 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3352 // Create a writeback register dummy placeholder. 3353 Inst.addOperand(MCOperand::CreateImm(0)); 3354 // Vn 3355 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3356 // Vm 3357 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3358 // pred 3359 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3360 return true; 3361} 3362 3363/// Parse an ARM memory expression, return false if successful else return true 3364/// or an error. The first token must be a '[' when called. 3365bool ARMAsmParser:: 3366parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3367 SMLoc S, E; 3368 assert(Parser.getTok().is(AsmToken::LBrac) && 3369 "Token is not a Left Bracket"); 3370 S = Parser.getTok().getLoc(); 3371 Parser.Lex(); // Eat left bracket token. 3372 3373 const AsmToken &BaseRegTok = Parser.getTok(); 3374 int BaseRegNum = tryParseRegister(); 3375 if (BaseRegNum == -1) 3376 return Error(BaseRegTok.getLoc(), "register expected"); 3377 3378 // The next token must either be a comma or a closing bracket. 3379 const AsmToken &Tok = Parser.getTok(); 3380 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3381 return Error(Tok.getLoc(), "malformed memory operand"); 3382 3383 if (Tok.is(AsmToken::RBrac)) { 3384 E = Tok.getLoc(); 3385 Parser.Lex(); // Eat right bracket token. 3386 3387 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3388 0, 0, false, S, E)); 3389 3390 // If there's a pre-indexing writeback marker, '!', just add it as a token 3391 // operand. It's rather odd, but syntactically valid. 3392 if (Parser.getTok().is(AsmToken::Exclaim)) { 3393 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3394 Parser.Lex(); // Eat the '!'. 3395 } 3396 3397 return false; 3398 } 3399 3400 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3401 Parser.Lex(); // Eat the comma. 3402 3403 // If we have a ':', it's an alignment specifier. 3404 if (Parser.getTok().is(AsmToken::Colon)) { 3405 Parser.Lex(); // Eat the ':'. 3406 E = Parser.getTok().getLoc(); 3407 3408 const MCExpr *Expr; 3409 if (getParser().ParseExpression(Expr)) 3410 return true; 3411 3412 // The expression has to be a constant. Memory references with relocations 3413 // don't come through here, as they use the <label> forms of the relevant 3414 // instructions. 3415 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3416 if (!CE) 3417 return Error (E, "constant expression expected"); 3418 3419 unsigned Align = 0; 3420 switch (CE->getValue()) { 3421 default: 3422 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3423 case 64: Align = 8; break; 3424 case 128: Align = 16; break; 3425 case 256: Align = 32; break; 3426 } 3427 3428 // Now we should have the closing ']' 3429 E = Parser.getTok().getLoc(); 3430 if (Parser.getTok().isNot(AsmToken::RBrac)) 3431 return Error(E, "']' expected"); 3432 Parser.Lex(); // Eat right bracket token. 3433 3434 // Don't worry about range checking the value here. That's handled by 3435 // the is*() predicates. 3436 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3437 ARM_AM::no_shift, 0, Align, 3438 false, S, E)); 3439 3440 // If there's a pre-indexing writeback marker, '!', just add it as a token 3441 // operand. 3442 if (Parser.getTok().is(AsmToken::Exclaim)) { 3443 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3444 Parser.Lex(); // Eat the '!'. 3445 } 3446 3447 return false; 3448 } 3449 3450 // If we have a '#', it's an immediate offset, else assume it's a register 3451 // offset. 3452 if (Parser.getTok().is(AsmToken::Hash)) { 3453 Parser.Lex(); // Eat the '#'. 3454 E = Parser.getTok().getLoc(); 3455 3456 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3457 const MCExpr *Offset; 3458 if (getParser().ParseExpression(Offset)) 3459 return true; 3460 3461 // The expression has to be a constant. Memory references with relocations 3462 // don't come through here, as they use the <label> forms of the relevant 3463 // instructions. 3464 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3465 if (!CE) 3466 return Error (E, "constant expression expected"); 3467 3468 // If the constant was #-0, represent it as INT32_MIN. 3469 int32_t Val = CE->getValue(); 3470 if (isNegative && Val == 0) 3471 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3472 3473 // Now we should have the closing ']' 3474 E = Parser.getTok().getLoc(); 3475 if (Parser.getTok().isNot(AsmToken::RBrac)) 3476 return Error(E, "']' expected"); 3477 Parser.Lex(); // Eat right bracket token. 3478 3479 // Don't worry about range checking the value here. That's handled by 3480 // the is*() predicates. 3481 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3482 ARM_AM::no_shift, 0, 0, 3483 false, S, E)); 3484 3485 // If there's a pre-indexing writeback marker, '!', just add it as a token 3486 // operand. 3487 if (Parser.getTok().is(AsmToken::Exclaim)) { 3488 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3489 Parser.Lex(); // Eat the '!'. 3490 } 3491 3492 return false; 3493 } 3494 3495 // The register offset is optionally preceded by a '+' or '-' 3496 bool isNegative = false; 3497 if (Parser.getTok().is(AsmToken::Minus)) { 3498 isNegative = true; 3499 Parser.Lex(); // Eat the '-'. 3500 } else if (Parser.getTok().is(AsmToken::Plus)) { 3501 // Nothing to do. 3502 Parser.Lex(); // Eat the '+'. 3503 } 3504 3505 E = Parser.getTok().getLoc(); 3506 int OffsetRegNum = tryParseRegister(); 3507 if (OffsetRegNum == -1) 3508 return Error(E, "register expected"); 3509 3510 // If there's a shift operator, handle it. 3511 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3512 unsigned ShiftImm = 0; 3513 if (Parser.getTok().is(AsmToken::Comma)) { 3514 Parser.Lex(); // Eat the ','. 3515 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3516 return true; 3517 } 3518 3519 // Now we should have the closing ']' 3520 E = Parser.getTok().getLoc(); 3521 if (Parser.getTok().isNot(AsmToken::RBrac)) 3522 return Error(E, "']' expected"); 3523 Parser.Lex(); // Eat right bracket token. 3524 3525 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3526 ShiftType, ShiftImm, 0, isNegative, 3527 S, E)); 3528 3529 // If there's a pre-indexing writeback marker, '!', just add it as a token 3530 // operand. 3531 if (Parser.getTok().is(AsmToken::Exclaim)) { 3532 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3533 Parser.Lex(); // Eat the '!'. 3534 } 3535 3536 return false; 3537} 3538 3539/// parseMemRegOffsetShift - one of these two: 3540/// ( lsl | lsr | asr | ror ) , # shift_amount 3541/// rrx 3542/// return true if it parses a shift otherwise it returns false. 3543bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3544 unsigned &Amount) { 3545 SMLoc Loc = Parser.getTok().getLoc(); 3546 const AsmToken &Tok = Parser.getTok(); 3547 if (Tok.isNot(AsmToken::Identifier)) 3548 return true; 3549 StringRef ShiftName = Tok.getString(); 3550 if (ShiftName == "lsl" || ShiftName == "LSL") 3551 St = ARM_AM::lsl; 3552 else if (ShiftName == "lsr" || ShiftName == "LSR") 3553 St = ARM_AM::lsr; 3554 else if (ShiftName == "asr" || ShiftName == "ASR") 3555 St = ARM_AM::asr; 3556 else if (ShiftName == "ror" || ShiftName == "ROR") 3557 St = ARM_AM::ror; 3558 else if (ShiftName == "rrx" || ShiftName == "RRX") 3559 St = ARM_AM::rrx; 3560 else 3561 return Error(Loc, "illegal shift operator"); 3562 Parser.Lex(); // Eat shift type token. 3563 3564 // rrx stands alone. 3565 Amount = 0; 3566 if (St != ARM_AM::rrx) { 3567 Loc = Parser.getTok().getLoc(); 3568 // A '#' and a shift amount. 3569 const AsmToken &HashTok = Parser.getTok(); 3570 if (HashTok.isNot(AsmToken::Hash)) 3571 return Error(HashTok.getLoc(), "'#' expected"); 3572 Parser.Lex(); // Eat hash token. 3573 3574 const MCExpr *Expr; 3575 if (getParser().ParseExpression(Expr)) 3576 return true; 3577 // Range check the immediate. 3578 // lsl, ror: 0 <= imm <= 31 3579 // lsr, asr: 0 <= imm <= 32 3580 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3581 if (!CE) 3582 return Error(Loc, "shift amount must be an immediate"); 3583 int64_t Imm = CE->getValue(); 3584 if (Imm < 0 || 3585 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3586 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3587 return Error(Loc, "immediate shift value out of range"); 3588 Amount = Imm; 3589 } 3590 3591 return false; 3592} 3593 3594/// parseFPImm - A floating point immediate expression operand. 3595ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3596parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3597 SMLoc S = Parser.getTok().getLoc(); 3598 3599 if (Parser.getTok().isNot(AsmToken::Hash)) 3600 return MatchOperand_NoMatch; 3601 3602 // Disambiguate the VMOV forms that can accept an FP immediate. 3603 // vmov.f32 <sreg>, #imm 3604 // vmov.f64 <dreg>, #imm 3605 // vmov.f32 <dreg>, #imm @ vector f32x2 3606 // vmov.f32 <qreg>, #imm @ vector f32x4 3607 // 3608 // There are also the NEON VMOV instructions which expect an 3609 // integer constant. Make sure we don't try to parse an FPImm 3610 // for these: 3611 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3612 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3613 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3614 TyOp->getToken() != ".f64")) 3615 return MatchOperand_NoMatch; 3616 3617 Parser.Lex(); // Eat the '#'. 3618 3619 // Handle negation, as that still comes through as a separate token. 3620 bool isNegative = false; 3621 if (Parser.getTok().is(AsmToken::Minus)) { 3622 isNegative = true; 3623 Parser.Lex(); 3624 } 3625 const AsmToken &Tok = Parser.getTok(); 3626 if (Tok.is(AsmToken::Real)) { 3627 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3628 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3629 // If we had a '-' in front, toggle the sign bit. 3630 IntVal ^= (uint64_t)isNegative << 63; 3631 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3632 Parser.Lex(); // Eat the token. 3633 if (Val == -1) { 3634 TokError("floating point value out of range"); 3635 return MatchOperand_ParseFail; 3636 } 3637 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3638 return MatchOperand_Success; 3639 } 3640 if (Tok.is(AsmToken::Integer)) { 3641 int64_t Val = Tok.getIntVal(); 3642 Parser.Lex(); // Eat the token. 3643 if (Val > 255 || Val < 0) { 3644 TokError("encoded floating point value out of range"); 3645 return MatchOperand_ParseFail; 3646 } 3647 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3648 return MatchOperand_Success; 3649 } 3650 3651 TokError("invalid floating point immediate"); 3652 return MatchOperand_ParseFail; 3653} 3654/// Parse a arm instruction operand. For now this parses the operand regardless 3655/// of the mnemonic. 3656bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3657 StringRef Mnemonic) { 3658 SMLoc S, E; 3659 3660 // Check if the current operand has a custom associated parser, if so, try to 3661 // custom parse the operand, or fallback to the general approach. 3662 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3663 if (ResTy == MatchOperand_Success) 3664 return false; 3665 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3666 // there was a match, but an error occurred, in which case, just return that 3667 // the operand parsing failed. 3668 if (ResTy == MatchOperand_ParseFail) 3669 return true; 3670 3671 switch (getLexer().getKind()) { 3672 default: 3673 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3674 return true; 3675 case AsmToken::Identifier: { 3676 // If this is VMRS, check for the apsr_nzcv operand. 3677 if (!tryParseRegisterWithWriteBack(Operands)) 3678 return false; 3679 int Res = tryParseShiftRegister(Operands); 3680 if (Res == 0) // success 3681 return false; 3682 else if (Res == -1) // irrecoverable error 3683 return true; 3684 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3685 S = Parser.getTok().getLoc(); 3686 Parser.Lex(); 3687 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3688 return false; 3689 } 3690 3691 // Fall though for the Identifier case that is not a register or a 3692 // special name. 3693 } 3694 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3695 case AsmToken::Dot: { // . as a branch target 3696 // This was not a register so parse other operands that start with an 3697 // identifier (like labels) as expressions and create them as immediates. 3698 const MCExpr *IdVal; 3699 S = Parser.getTok().getLoc(); 3700 if (getParser().ParseExpression(IdVal)) 3701 return true; 3702 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3703 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3704 return false; 3705 } 3706 case AsmToken::LBrac: 3707 return parseMemory(Operands); 3708 case AsmToken::LCurly: 3709 return parseRegisterList(Operands); 3710 case AsmToken::Hash: { 3711 // #42 -> immediate. 3712 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3713 S = Parser.getTok().getLoc(); 3714 Parser.Lex(); 3715 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3716 const MCExpr *ImmVal; 3717 if (getParser().ParseExpression(ImmVal)) 3718 return true; 3719 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3720 if (!CE) { 3721 Error(S, "constant expression expected"); 3722 return MatchOperand_ParseFail; 3723 } 3724 int32_t Val = CE->getValue(); 3725 if (isNegative && Val == 0) 3726 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3727 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3728 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3729 return false; 3730 } 3731 case AsmToken::Colon: { 3732 // ":lower16:" and ":upper16:" expression prefixes 3733 // FIXME: Check it's an expression prefix, 3734 // e.g. (FOO - :lower16:BAR) isn't legal. 3735 ARMMCExpr::VariantKind RefKind; 3736 if (parsePrefix(RefKind)) 3737 return true; 3738 3739 const MCExpr *SubExprVal; 3740 if (getParser().ParseExpression(SubExprVal)) 3741 return true; 3742 3743 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3744 getContext()); 3745 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3746 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3747 return false; 3748 } 3749 } 3750} 3751 3752// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3753// :lower16: and :upper16:. 3754bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3755 RefKind = ARMMCExpr::VK_ARM_None; 3756 3757 // :lower16: and :upper16: modifiers 3758 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3759 Parser.Lex(); // Eat ':' 3760 3761 if (getLexer().isNot(AsmToken::Identifier)) { 3762 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3763 return true; 3764 } 3765 3766 StringRef IDVal = Parser.getTok().getIdentifier(); 3767 if (IDVal == "lower16") { 3768 RefKind = ARMMCExpr::VK_ARM_LO16; 3769 } else if (IDVal == "upper16") { 3770 RefKind = ARMMCExpr::VK_ARM_HI16; 3771 } else { 3772 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3773 return true; 3774 } 3775 Parser.Lex(); 3776 3777 if (getLexer().isNot(AsmToken::Colon)) { 3778 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3779 return true; 3780 } 3781 Parser.Lex(); // Eat the last ':' 3782 return false; 3783} 3784 3785/// \brief Given a mnemonic, split out possible predication code and carry 3786/// setting letters to form a canonical mnemonic and flags. 3787// 3788// FIXME: Would be nice to autogen this. 3789// FIXME: This is a bit of a maze of special cases. 3790StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3791 unsigned &PredicationCode, 3792 bool &CarrySetting, 3793 unsigned &ProcessorIMod, 3794 StringRef &ITMask) { 3795 PredicationCode = ARMCC::AL; 3796 CarrySetting = false; 3797 ProcessorIMod = 0; 3798 3799 // Ignore some mnemonics we know aren't predicated forms. 3800 // 3801 // FIXME: Would be nice to autogen this. 3802 if ((Mnemonic == "movs" && isThumb()) || 3803 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3804 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3805 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3806 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3807 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3808 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3809 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3810 return Mnemonic; 3811 3812 // First, split out any predication code. Ignore mnemonics we know aren't 3813 // predicated but do have a carry-set and so weren't caught above. 3814 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3815 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3816 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3817 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3818 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3819 .Case("eq", ARMCC::EQ) 3820 .Case("ne", ARMCC::NE) 3821 .Case("hs", ARMCC::HS) 3822 .Case("cs", ARMCC::HS) 3823 .Case("lo", ARMCC::LO) 3824 .Case("cc", ARMCC::LO) 3825 .Case("mi", ARMCC::MI) 3826 .Case("pl", ARMCC::PL) 3827 .Case("vs", ARMCC::VS) 3828 .Case("vc", ARMCC::VC) 3829 .Case("hi", ARMCC::HI) 3830 .Case("ls", ARMCC::LS) 3831 .Case("ge", ARMCC::GE) 3832 .Case("lt", ARMCC::LT) 3833 .Case("gt", ARMCC::GT) 3834 .Case("le", ARMCC::LE) 3835 .Case("al", ARMCC::AL) 3836 .Default(~0U); 3837 if (CC != ~0U) { 3838 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3839 PredicationCode = CC; 3840 } 3841 } 3842 3843 // Next, determine if we have a carry setting bit. We explicitly ignore all 3844 // the instructions we know end in 's'. 3845 if (Mnemonic.endswith("s") && 3846 !(Mnemonic == "cps" || Mnemonic == "mls" || 3847 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3848 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3849 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3850 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3851 (Mnemonic == "movs" && isThumb()))) { 3852 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3853 CarrySetting = true; 3854 } 3855 3856 // The "cps" instruction can have a interrupt mode operand which is glued into 3857 // the mnemonic. Check if this is the case, split it and parse the imod op 3858 if (Mnemonic.startswith("cps")) { 3859 // Split out any imod code. 3860 unsigned IMod = 3861 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3862 .Case("ie", ARM_PROC::IE) 3863 .Case("id", ARM_PROC::ID) 3864 .Default(~0U); 3865 if (IMod != ~0U) { 3866 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3867 ProcessorIMod = IMod; 3868 } 3869 } 3870 3871 // The "it" instruction has the condition mask on the end of the mnemonic. 3872 if (Mnemonic.startswith("it")) { 3873 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3874 Mnemonic = Mnemonic.slice(0, 2); 3875 } 3876 3877 return Mnemonic; 3878} 3879 3880/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3881/// inclusion of carry set or predication code operands. 3882// 3883// FIXME: It would be nice to autogen this. 3884void ARMAsmParser:: 3885getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3886 bool &CanAcceptPredicationCode) { 3887 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3888 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3889 Mnemonic == "add" || Mnemonic == "adc" || 3890 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3891 Mnemonic == "orr" || Mnemonic == "mvn" || 3892 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3893 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3894 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3895 Mnemonic == "mla" || Mnemonic == "smlal" || 3896 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3897 CanAcceptCarrySet = true; 3898 } else 3899 CanAcceptCarrySet = false; 3900 3901 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3902 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3903 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3904 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3905 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3906 (Mnemonic == "clrex" && !isThumb()) || 3907 (Mnemonic == "nop" && isThumbOne()) || 3908 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3909 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3910 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3911 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3912 !isThumb()) || 3913 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3914 CanAcceptPredicationCode = false; 3915 } else 3916 CanAcceptPredicationCode = true; 3917 3918 if (isThumb()) { 3919 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3920 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3921 CanAcceptPredicationCode = false; 3922 } 3923} 3924 3925bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3926 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3927 // FIXME: This is all horribly hacky. We really need a better way to deal 3928 // with optional operands like this in the matcher table. 3929 3930 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3931 // another does not. Specifically, the MOVW instruction does not. So we 3932 // special case it here and remove the defaulted (non-setting) cc_out 3933 // operand if that's the instruction we're trying to match. 3934 // 3935 // We do this as post-processing of the explicit operands rather than just 3936 // conditionally adding the cc_out in the first place because we need 3937 // to check the type of the parsed immediate operand. 3938 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3939 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3940 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3941 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3942 return true; 3943 3944 // Register-register 'add' for thumb does not have a cc_out operand 3945 // when there are only two register operands. 3946 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3947 static_cast<ARMOperand*>(Operands[3])->isReg() && 3948 static_cast<ARMOperand*>(Operands[4])->isReg() && 3949 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3950 return true; 3951 // Register-register 'add' for thumb does not have a cc_out operand 3952 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3953 // have to check the immediate range here since Thumb2 has a variant 3954 // that can handle a different range and has a cc_out operand. 3955 if (((isThumb() && Mnemonic == "add") || 3956 (isThumbTwo() && Mnemonic == "sub")) && 3957 Operands.size() == 6 && 3958 static_cast<ARMOperand*>(Operands[3])->isReg() && 3959 static_cast<ARMOperand*>(Operands[4])->isReg() && 3960 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3961 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3962 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3963 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3964 return true; 3965 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3966 // imm0_4095 variant. That's the least-preferred variant when 3967 // selecting via the generic "add" mnemonic, so to know that we 3968 // should remove the cc_out operand, we have to explicitly check that 3969 // it's not one of the other variants. Ugh. 3970 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3971 Operands.size() == 6 && 3972 static_cast<ARMOperand*>(Operands[3])->isReg() && 3973 static_cast<ARMOperand*>(Operands[4])->isReg() && 3974 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3975 // Nest conditions rather than one big 'if' statement for readability. 3976 // 3977 // If either register is a high reg, it's either one of the SP 3978 // variants (handled above) or a 32-bit encoding, so we just 3979 // check against T3. 3980 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3981 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3982 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3983 return false; 3984 // If both registers are low, we're in an IT block, and the immediate is 3985 // in range, we should use encoding T1 instead, which has a cc_out. 3986 if (inITBlock() && 3987 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3988 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3989 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3990 return false; 3991 3992 // Otherwise, we use encoding T4, which does not have a cc_out 3993 // operand. 3994 return true; 3995 } 3996 3997 // The thumb2 multiply instruction doesn't have a CCOut register, so 3998 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3999 // use the 16-bit encoding or not. 4000 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 4001 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 4002 static_cast<ARMOperand*>(Operands[3])->isReg() && 4003 static_cast<ARMOperand*>(Operands[4])->isReg() && 4004 static_cast<ARMOperand*>(Operands[5])->isReg() && 4005 // If the registers aren't low regs, the destination reg isn't the 4006 // same as one of the source regs, or the cc_out operand is zero 4007 // outside of an IT block, we have to use the 32-bit encoding, so 4008 // remove the cc_out operand. 4009 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4010 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4011 !inITBlock() || 4012 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4013 static_cast<ARMOperand*>(Operands[5])->getReg() && 4014 static_cast<ARMOperand*>(Operands[3])->getReg() != 4015 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4016 return true; 4017 4018 4019 4020 // Register-register 'add/sub' for thumb does not have a cc_out operand 4021 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4022 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4023 // right, this will result in better diagnostics (which operand is off) 4024 // anyway. 4025 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4026 (Operands.size() == 5 || Operands.size() == 6) && 4027 static_cast<ARMOperand*>(Operands[3])->isReg() && 4028 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4029 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4030 return true; 4031 4032 return false; 4033} 4034 4035/// Parse an arm instruction mnemonic followed by its operands. 4036bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4037 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4038 // Create the leading tokens for the mnemonic, split by '.' characters. 4039 size_t Start = 0, Next = Name.find('.'); 4040 StringRef Mnemonic = Name.slice(Start, Next); 4041 4042 // Split out the predication code and carry setting flag from the mnemonic. 4043 unsigned PredicationCode; 4044 unsigned ProcessorIMod; 4045 bool CarrySetting; 4046 StringRef ITMask; 4047 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4048 ProcessorIMod, ITMask); 4049 4050 // In Thumb1, only the branch (B) instruction can be predicated. 4051 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4052 Parser.EatToEndOfStatement(); 4053 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4054 } 4055 4056 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4057 4058 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4059 // is the mask as it will be for the IT encoding if the conditional 4060 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4061 // where the conditional bit0 is zero, the instruction post-processing 4062 // will adjust the mask accordingly. 4063 if (Mnemonic == "it") { 4064 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4065 if (ITMask.size() > 3) { 4066 Parser.EatToEndOfStatement(); 4067 return Error(Loc, "too many conditions on IT instruction"); 4068 } 4069 unsigned Mask = 8; 4070 for (unsigned i = ITMask.size(); i != 0; --i) { 4071 char pos = ITMask[i - 1]; 4072 if (pos != 't' && pos != 'e') { 4073 Parser.EatToEndOfStatement(); 4074 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4075 } 4076 Mask >>= 1; 4077 if (ITMask[i - 1] == 't') 4078 Mask |= 8; 4079 } 4080 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4081 } 4082 4083 // FIXME: This is all a pretty gross hack. We should automatically handle 4084 // optional operands like this via tblgen. 4085 4086 // Next, add the CCOut and ConditionCode operands, if needed. 4087 // 4088 // For mnemonics which can ever incorporate a carry setting bit or predication 4089 // code, our matching model involves us always generating CCOut and 4090 // ConditionCode operands to match the mnemonic "as written" and then we let 4091 // the matcher deal with finding the right instruction or generating an 4092 // appropriate error. 4093 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4094 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4095 4096 // If we had a carry-set on an instruction that can't do that, issue an 4097 // error. 4098 if (!CanAcceptCarrySet && CarrySetting) { 4099 Parser.EatToEndOfStatement(); 4100 return Error(NameLoc, "instruction '" + Mnemonic + 4101 "' can not set flags, but 's' suffix specified"); 4102 } 4103 // If we had a predication code on an instruction that can't do that, issue an 4104 // error. 4105 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4106 Parser.EatToEndOfStatement(); 4107 return Error(NameLoc, "instruction '" + Mnemonic + 4108 "' is not predicable, but condition code specified"); 4109 } 4110 4111 // Add the carry setting operand, if necessary. 4112 if (CanAcceptCarrySet) { 4113 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4114 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4115 Loc)); 4116 } 4117 4118 // Add the predication code operand, if necessary. 4119 if (CanAcceptPredicationCode) { 4120 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4121 CarrySetting); 4122 Operands.push_back(ARMOperand::CreateCondCode( 4123 ARMCC::CondCodes(PredicationCode), Loc)); 4124 } 4125 4126 // Add the processor imod operand, if necessary. 4127 if (ProcessorIMod) { 4128 Operands.push_back(ARMOperand::CreateImm( 4129 MCConstantExpr::Create(ProcessorIMod, getContext()), 4130 NameLoc, NameLoc)); 4131 } 4132 4133 // Add the remaining tokens in the mnemonic. 4134 while (Next != StringRef::npos) { 4135 Start = Next; 4136 Next = Name.find('.', Start + 1); 4137 StringRef ExtraToken = Name.slice(Start, Next); 4138 4139 // For now, we're only parsing Thumb1 (for the most part), so 4140 // just ignore ".n" qualifiers. We'll use them to restrict 4141 // matching when we do Thumb2. 4142 if (ExtraToken != ".n") { 4143 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4144 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4145 } 4146 } 4147 4148 // Read the remaining operands. 4149 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4150 // Read the first operand. 4151 if (parseOperand(Operands, Mnemonic)) { 4152 Parser.EatToEndOfStatement(); 4153 return true; 4154 } 4155 4156 while (getLexer().is(AsmToken::Comma)) { 4157 Parser.Lex(); // Eat the comma. 4158 4159 // Parse and remember the operand. 4160 if (parseOperand(Operands, Mnemonic)) { 4161 Parser.EatToEndOfStatement(); 4162 return true; 4163 } 4164 } 4165 } 4166 4167 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4168 SMLoc Loc = getLexer().getLoc(); 4169 Parser.EatToEndOfStatement(); 4170 return Error(Loc, "unexpected token in argument list"); 4171 } 4172 4173 Parser.Lex(); // Consume the EndOfStatement 4174 4175 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4176 // do and don't have a cc_out optional-def operand. With some spot-checks 4177 // of the operand list, we can figure out which variant we're trying to 4178 // parse and adjust accordingly before actually matching. We shouldn't ever 4179 // try to remove a cc_out operand that was explicitly set on the the 4180 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4181 // table driven matcher doesn't fit well with the ARM instruction set. 4182 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4183 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4184 Operands.erase(Operands.begin() + 1); 4185 delete Op; 4186 } 4187 4188 // ARM mode 'blx' need special handling, as the register operand version 4189 // is predicable, but the label operand version is not. So, we can't rely 4190 // on the Mnemonic based checking to correctly figure out when to put 4191 // a k_CondCode operand in the list. If we're trying to match the label 4192 // version, remove the k_CondCode operand here. 4193 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4194 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4195 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4196 Operands.erase(Operands.begin() + 1); 4197 delete Op; 4198 } 4199 4200 // The vector-compare-to-zero instructions have a literal token "#0" at 4201 // the end that comes to here as an immediate operand. Convert it to a 4202 // token to play nicely with the matcher. 4203 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4204 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4205 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4206 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4207 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4208 if (CE && CE->getValue() == 0) { 4209 Operands.erase(Operands.begin() + 5); 4210 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4211 delete Op; 4212 } 4213 } 4214 // VCMP{E} does the same thing, but with a different operand count. 4215 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4216 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4217 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4218 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4219 if (CE && CE->getValue() == 0) { 4220 Operands.erase(Operands.begin() + 4); 4221 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4222 delete Op; 4223 } 4224 } 4225 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4226 // end. Convert it to a token here. 4227 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4228 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4229 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4230 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4231 if (CE && CE->getValue() == 0) { 4232 Operands.erase(Operands.begin() + 5); 4233 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4234 delete Op; 4235 } 4236 } 4237 4238 return false; 4239} 4240 4241// Validate context-sensitive operand constraints. 4242 4243// return 'true' if register list contains non-low GPR registers, 4244// 'false' otherwise. If Reg is in the register list or is HiReg, set 4245// 'containsReg' to true. 4246static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4247 unsigned HiReg, bool &containsReg) { 4248 containsReg = false; 4249 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4250 unsigned OpReg = Inst.getOperand(i).getReg(); 4251 if (OpReg == Reg) 4252 containsReg = true; 4253 // Anything other than a low register isn't legal here. 4254 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4255 return true; 4256 } 4257 return false; 4258} 4259 4260// Check if the specified regisgter is in the register list of the inst, 4261// starting at the indicated operand number. 4262static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4263 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4264 unsigned OpReg = Inst.getOperand(i).getReg(); 4265 if (OpReg == Reg) 4266 return true; 4267 } 4268 return false; 4269} 4270 4271// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4272// the ARMInsts array) instead. Getting that here requires awkward 4273// API changes, though. Better way? 4274namespace llvm { 4275extern const MCInstrDesc ARMInsts[]; 4276} 4277static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4278 return ARMInsts[Opcode]; 4279} 4280 4281// FIXME: We would really like to be able to tablegen'erate this. 4282bool ARMAsmParser:: 4283validateInstruction(MCInst &Inst, 4284 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4285 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4286 SMLoc Loc = Operands[0]->getStartLoc(); 4287 // Check the IT block state first. 4288 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4289 // being allowed in IT blocks, but not being predicable. It just always 4290 // executes. 4291 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4292 unsigned bit = 1; 4293 if (ITState.FirstCond) 4294 ITState.FirstCond = false; 4295 else 4296 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4297 // The instruction must be predicable. 4298 if (!MCID.isPredicable()) 4299 return Error(Loc, "instructions in IT block must be predicable"); 4300 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4301 unsigned ITCond = bit ? ITState.Cond : 4302 ARMCC::getOppositeCondition(ITState.Cond); 4303 if (Cond != ITCond) { 4304 // Find the condition code Operand to get its SMLoc information. 4305 SMLoc CondLoc; 4306 for (unsigned i = 1; i < Operands.size(); ++i) 4307 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4308 CondLoc = Operands[i]->getStartLoc(); 4309 return Error(CondLoc, "incorrect condition in IT block; got '" + 4310 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4311 "', but expected '" + 4312 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4313 } 4314 // Check for non-'al' condition codes outside of the IT block. 4315 } else if (isThumbTwo() && MCID.isPredicable() && 4316 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4317 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4318 Inst.getOpcode() != ARM::t2B) 4319 return Error(Loc, "predicated instructions must be in IT block"); 4320 4321 switch (Inst.getOpcode()) { 4322 case ARM::LDRD: 4323 case ARM::LDRD_PRE: 4324 case ARM::LDRD_POST: 4325 case ARM::LDREXD: { 4326 // Rt2 must be Rt + 1. 4327 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4328 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4329 if (Rt2 != Rt + 1) 4330 return Error(Operands[3]->getStartLoc(), 4331 "destination operands must be sequential"); 4332 return false; 4333 } 4334 case ARM::STRD: { 4335 // Rt2 must be Rt + 1. 4336 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4337 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4338 if (Rt2 != Rt + 1) 4339 return Error(Operands[3]->getStartLoc(), 4340 "source operands must be sequential"); 4341 return false; 4342 } 4343 case ARM::STRD_PRE: 4344 case ARM::STRD_POST: 4345 case ARM::STREXD: { 4346 // Rt2 must be Rt + 1. 4347 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4348 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4349 if (Rt2 != Rt + 1) 4350 return Error(Operands[3]->getStartLoc(), 4351 "source operands must be sequential"); 4352 return false; 4353 } 4354 case ARM::SBFX: 4355 case ARM::UBFX: { 4356 // width must be in range [1, 32-lsb] 4357 unsigned lsb = Inst.getOperand(2).getImm(); 4358 unsigned widthm1 = Inst.getOperand(3).getImm(); 4359 if (widthm1 >= 32 - lsb) 4360 return Error(Operands[5]->getStartLoc(), 4361 "bitfield width must be in range [1,32-lsb]"); 4362 return false; 4363 } 4364 case ARM::tLDMIA: { 4365 // If we're parsing Thumb2, the .w variant is available and handles 4366 // most cases that are normally illegal for a Thumb1 LDM 4367 // instruction. We'll make the transformation in processInstruction() 4368 // if necessary. 4369 // 4370 // Thumb LDM instructions are writeback iff the base register is not 4371 // in the register list. 4372 unsigned Rn = Inst.getOperand(0).getReg(); 4373 bool hasWritebackToken = 4374 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4375 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4376 bool listContainsBase; 4377 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4378 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4379 "registers must be in range r0-r7"); 4380 // If we should have writeback, then there should be a '!' token. 4381 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4382 return Error(Operands[2]->getStartLoc(), 4383 "writeback operator '!' expected"); 4384 // If we should not have writeback, there must not be a '!'. This is 4385 // true even for the 32-bit wide encodings. 4386 if (listContainsBase && hasWritebackToken) 4387 return Error(Operands[3]->getStartLoc(), 4388 "writeback operator '!' not allowed when base register " 4389 "in register list"); 4390 4391 break; 4392 } 4393 case ARM::t2LDMIA_UPD: { 4394 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4395 return Error(Operands[4]->getStartLoc(), 4396 "writeback operator '!' not allowed when base register " 4397 "in register list"); 4398 break; 4399 } 4400 case ARM::tPOP: { 4401 bool listContainsBase; 4402 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4403 return Error(Operands[2]->getStartLoc(), 4404 "registers must be in range r0-r7 or pc"); 4405 break; 4406 } 4407 case ARM::tPUSH: { 4408 bool listContainsBase; 4409 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4410 return Error(Operands[2]->getStartLoc(), 4411 "registers must be in range r0-r7 or lr"); 4412 break; 4413 } 4414 case ARM::tSTMIA_UPD: { 4415 bool listContainsBase; 4416 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4417 return Error(Operands[4]->getStartLoc(), 4418 "registers must be in range r0-r7"); 4419 break; 4420 } 4421 } 4422 4423 return false; 4424} 4425 4426void ARMAsmParser:: 4427processInstruction(MCInst &Inst, 4428 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4429 switch (Inst.getOpcode()) { 4430 case ARM::LDMIA_UPD: 4431 // If this is a load of a single register via a 'pop', then we should use 4432 // a post-indexed LDR instruction instead, per the ARM ARM. 4433 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4434 Inst.getNumOperands() == 5) { 4435 MCInst TmpInst; 4436 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4437 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4438 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4439 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4440 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4441 TmpInst.addOperand(MCOperand::CreateImm(4)); 4442 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4443 TmpInst.addOperand(Inst.getOperand(3)); 4444 Inst = TmpInst; 4445 } 4446 break; 4447 case ARM::STMDB_UPD: 4448 // If this is a store of a single register via a 'push', then we should use 4449 // a pre-indexed STR instruction instead, per the ARM ARM. 4450 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4451 Inst.getNumOperands() == 5) { 4452 MCInst TmpInst; 4453 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4454 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4455 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4456 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4457 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4458 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4459 TmpInst.addOperand(Inst.getOperand(3)); 4460 Inst = TmpInst; 4461 } 4462 break; 4463 case ARM::tADDi8: 4464 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4465 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4466 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4467 // to encoding T1 if <Rd> is omitted." 4468 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4469 Inst.setOpcode(ARM::tADDi3); 4470 break; 4471 case ARM::tSUBi8: 4472 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4473 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4474 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4475 // to encoding T1 if <Rd> is omitted." 4476 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4477 Inst.setOpcode(ARM::tSUBi3); 4478 break; 4479 case ARM::tB: 4480 // A Thumb conditional branch outside of an IT block is a tBcc. 4481 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4482 Inst.setOpcode(ARM::tBcc); 4483 break; 4484 case ARM::t2B: 4485 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4486 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4487 Inst.setOpcode(ARM::t2Bcc); 4488 break; 4489 case ARM::t2Bcc: 4490 // If the conditional is AL or we're in an IT block, we really want t2B. 4491 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4492 Inst.setOpcode(ARM::t2B); 4493 break; 4494 case ARM::tBcc: 4495 // If the conditional is AL, we really want tB. 4496 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4497 Inst.setOpcode(ARM::tB); 4498 break; 4499 case ARM::tLDMIA: { 4500 // If the register list contains any high registers, or if the writeback 4501 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4502 // instead if we're in Thumb2. Otherwise, this should have generated 4503 // an error in validateInstruction(). 4504 unsigned Rn = Inst.getOperand(0).getReg(); 4505 bool hasWritebackToken = 4506 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4507 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4508 bool listContainsBase; 4509 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4510 (!listContainsBase && !hasWritebackToken) || 4511 (listContainsBase && hasWritebackToken)) { 4512 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4513 assert (isThumbTwo()); 4514 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4515 // If we're switching to the updating version, we need to insert 4516 // the writeback tied operand. 4517 if (hasWritebackToken) 4518 Inst.insert(Inst.begin(), 4519 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4520 } 4521 break; 4522 } 4523 case ARM::tSTMIA_UPD: { 4524 // If the register list contains any high registers, we need to use 4525 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4526 // should have generated an error in validateInstruction(). 4527 unsigned Rn = Inst.getOperand(0).getReg(); 4528 bool listContainsBase; 4529 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4530 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4531 assert (isThumbTwo()); 4532 Inst.setOpcode(ARM::t2STMIA_UPD); 4533 } 4534 break; 4535 } 4536 case ARM::t2MOVi: { 4537 // If we can use the 16-bit encoding and the user didn't explicitly 4538 // request the 32-bit variant, transform it here. 4539 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4540 Inst.getOperand(1).getImm() <= 255 && 4541 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4542 Inst.getOperand(4).getReg() == ARM::CPSR) || 4543 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4544 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4545 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4546 // The operands aren't in the same order for tMOVi8... 4547 MCInst TmpInst; 4548 TmpInst.setOpcode(ARM::tMOVi8); 4549 TmpInst.addOperand(Inst.getOperand(0)); 4550 TmpInst.addOperand(Inst.getOperand(4)); 4551 TmpInst.addOperand(Inst.getOperand(1)); 4552 TmpInst.addOperand(Inst.getOperand(2)); 4553 TmpInst.addOperand(Inst.getOperand(3)); 4554 Inst = TmpInst; 4555 } 4556 break; 4557 } 4558 case ARM::t2MOVr: { 4559 // If we can use the 16-bit encoding and the user didn't explicitly 4560 // request the 32-bit variant, transform it here. 4561 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4562 isARMLowRegister(Inst.getOperand(1).getReg()) && 4563 Inst.getOperand(2).getImm() == ARMCC::AL && 4564 Inst.getOperand(4).getReg() == ARM::CPSR && 4565 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4566 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4567 // The operands aren't the same for tMOV[S]r... (no cc_out) 4568 MCInst TmpInst; 4569 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4570 TmpInst.addOperand(Inst.getOperand(0)); 4571 TmpInst.addOperand(Inst.getOperand(1)); 4572 TmpInst.addOperand(Inst.getOperand(2)); 4573 TmpInst.addOperand(Inst.getOperand(3)); 4574 Inst = TmpInst; 4575 } 4576 break; 4577 } 4578 case ARM::t2SXTH: 4579 case ARM::t2SXTB: 4580 case ARM::t2UXTH: 4581 case ARM::t2UXTB: { 4582 // If we can use the 16-bit encoding and the user didn't explicitly 4583 // request the 32-bit variant, transform it here. 4584 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4585 isARMLowRegister(Inst.getOperand(1).getReg()) && 4586 Inst.getOperand(2).getImm() == 0 && 4587 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4588 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4589 unsigned NewOpc; 4590 switch (Inst.getOpcode()) { 4591 default: llvm_unreachable("Illegal opcode!"); 4592 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4593 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4594 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4595 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4596 } 4597 // The operands aren't the same for thumb1 (no rotate operand). 4598 MCInst TmpInst; 4599 TmpInst.setOpcode(NewOpc); 4600 TmpInst.addOperand(Inst.getOperand(0)); 4601 TmpInst.addOperand(Inst.getOperand(1)); 4602 TmpInst.addOperand(Inst.getOperand(3)); 4603 TmpInst.addOperand(Inst.getOperand(4)); 4604 Inst = TmpInst; 4605 } 4606 break; 4607 } 4608 case ARM::t2IT: { 4609 // The mask bits for all but the first condition are represented as 4610 // the low bit of the condition code value implies 't'. We currently 4611 // always have 1 implies 't', so XOR toggle the bits if the low bit 4612 // of the condition code is zero. The encoding also expects the low 4613 // bit of the condition to be encoded as bit 4 of the mask operand, 4614 // so mask that in if needed 4615 MCOperand &MO = Inst.getOperand(1); 4616 unsigned Mask = MO.getImm(); 4617 unsigned OrigMask = Mask; 4618 unsigned TZ = CountTrailingZeros_32(Mask); 4619 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4620 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4621 for (unsigned i = 3; i != TZ; --i) 4622 Mask ^= 1 << i; 4623 } else 4624 Mask |= 0x10; 4625 MO.setImm(Mask); 4626 4627 // Set up the IT block state according to the IT instruction we just 4628 // matched. 4629 assert(!inITBlock() && "nested IT blocks?!"); 4630 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4631 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4632 ITState.CurPosition = 0; 4633 ITState.FirstCond = true; 4634 break; 4635 } 4636 } 4637} 4638 4639unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4640 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4641 // suffix depending on whether they're in an IT block or not. 4642 unsigned Opc = Inst.getOpcode(); 4643 const MCInstrDesc &MCID = getInstDesc(Opc); 4644 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4645 assert(MCID.hasOptionalDef() && 4646 "optionally flag setting instruction missing optional def operand"); 4647 assert(MCID.NumOperands == Inst.getNumOperands() && 4648 "operand count mismatch!"); 4649 // Find the optional-def operand (cc_out). 4650 unsigned OpNo; 4651 for (OpNo = 0; 4652 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4653 ++OpNo) 4654 ; 4655 // If we're parsing Thumb1, reject it completely. 4656 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4657 return Match_MnemonicFail; 4658 // If we're parsing Thumb2, which form is legal depends on whether we're 4659 // in an IT block. 4660 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4661 !inITBlock()) 4662 return Match_RequiresITBlock; 4663 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4664 inITBlock()) 4665 return Match_RequiresNotITBlock; 4666 } 4667 // Some high-register supporting Thumb1 encodings only allow both registers 4668 // to be from r0-r7 when in Thumb2. 4669 else if (Opc == ARM::tADDhirr && isThumbOne() && 4670 isARMLowRegister(Inst.getOperand(1).getReg()) && 4671 isARMLowRegister(Inst.getOperand(2).getReg())) 4672 return Match_RequiresThumb2; 4673 // Others only require ARMv6 or later. 4674 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4675 isARMLowRegister(Inst.getOperand(0).getReg()) && 4676 isARMLowRegister(Inst.getOperand(1).getReg())) 4677 return Match_RequiresV6; 4678 return Match_Success; 4679} 4680 4681bool ARMAsmParser:: 4682MatchAndEmitInstruction(SMLoc IDLoc, 4683 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4684 MCStreamer &Out) { 4685 MCInst Inst; 4686 unsigned ErrorInfo; 4687 unsigned MatchResult; 4688 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4689 switch (MatchResult) { 4690 default: break; 4691 case Match_Success: 4692 // Context sensitive operand constraints aren't handled by the matcher, 4693 // so check them here. 4694 if (validateInstruction(Inst, Operands)) { 4695 // Still progress the IT block, otherwise one wrong condition causes 4696 // nasty cascading errors. 4697 forwardITPosition(); 4698 return true; 4699 } 4700 4701 // Some instructions need post-processing to, for example, tweak which 4702 // encoding is selected. 4703 processInstruction(Inst, Operands); 4704 4705 // Only move forward at the very end so that everything in validate 4706 // and process gets a consistent answer about whether we're in an IT 4707 // block. 4708 forwardITPosition(); 4709 4710 Out.EmitInstruction(Inst); 4711 return false; 4712 case Match_MissingFeature: 4713 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4714 return true; 4715 case Match_InvalidOperand: { 4716 SMLoc ErrorLoc = IDLoc; 4717 if (ErrorInfo != ~0U) { 4718 if (ErrorInfo >= Operands.size()) 4719 return Error(IDLoc, "too few operands for instruction"); 4720 4721 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4722 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4723 } 4724 4725 return Error(ErrorLoc, "invalid operand for instruction"); 4726 } 4727 case Match_MnemonicFail: 4728 return Error(IDLoc, "invalid instruction"); 4729 case Match_ConversionFail: 4730 // The converter function will have already emited a diagnostic. 4731 return true; 4732 case Match_RequiresNotITBlock: 4733 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4734 case Match_RequiresITBlock: 4735 return Error(IDLoc, "instruction only valid inside IT block"); 4736 case Match_RequiresV6: 4737 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4738 case Match_RequiresThumb2: 4739 return Error(IDLoc, "instruction variant requires Thumb2"); 4740 } 4741 4742 llvm_unreachable("Implement any new match types added!"); 4743 return true; 4744} 4745 4746/// parseDirective parses the arm specific directives 4747bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4748 StringRef IDVal = DirectiveID.getIdentifier(); 4749 if (IDVal == ".word") 4750 return parseDirectiveWord(4, DirectiveID.getLoc()); 4751 else if (IDVal == ".thumb") 4752 return parseDirectiveThumb(DirectiveID.getLoc()); 4753 else if (IDVal == ".thumb_func") 4754 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4755 else if (IDVal == ".code") 4756 return parseDirectiveCode(DirectiveID.getLoc()); 4757 else if (IDVal == ".syntax") 4758 return parseDirectiveSyntax(DirectiveID.getLoc()); 4759 return true; 4760} 4761 4762/// parseDirectiveWord 4763/// ::= .word [ expression (, expression)* ] 4764bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4765 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4766 for (;;) { 4767 const MCExpr *Value; 4768 if (getParser().ParseExpression(Value)) 4769 return true; 4770 4771 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4772 4773 if (getLexer().is(AsmToken::EndOfStatement)) 4774 break; 4775 4776 // FIXME: Improve diagnostic. 4777 if (getLexer().isNot(AsmToken::Comma)) 4778 return Error(L, "unexpected token in directive"); 4779 Parser.Lex(); 4780 } 4781 } 4782 4783 Parser.Lex(); 4784 return false; 4785} 4786 4787/// parseDirectiveThumb 4788/// ::= .thumb 4789bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4790 if (getLexer().isNot(AsmToken::EndOfStatement)) 4791 return Error(L, "unexpected token in directive"); 4792 Parser.Lex(); 4793 4794 // TODO: set thumb mode 4795 // TODO: tell the MC streamer the mode 4796 // getParser().getStreamer().Emit???(); 4797 return false; 4798} 4799 4800/// parseDirectiveThumbFunc 4801/// ::= .thumbfunc symbol_name 4802bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4803 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4804 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4805 StringRef Name; 4806 4807 // Darwin asm has function name after .thumb_func direction 4808 // ELF doesn't 4809 if (isMachO) { 4810 const AsmToken &Tok = Parser.getTok(); 4811 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4812 return Error(L, "unexpected token in .thumb_func directive"); 4813 Name = Tok.getString(); 4814 Parser.Lex(); // Consume the identifier token. 4815 } 4816 4817 if (getLexer().isNot(AsmToken::EndOfStatement)) 4818 return Error(L, "unexpected token in directive"); 4819 Parser.Lex(); 4820 4821 // FIXME: assuming function name will be the line following .thumb_func 4822 if (!isMachO) { 4823 Name = Parser.getTok().getString(); 4824 } 4825 4826 // Mark symbol as a thumb symbol. 4827 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4828 getParser().getStreamer().EmitThumbFunc(Func); 4829 return false; 4830} 4831 4832/// parseDirectiveSyntax 4833/// ::= .syntax unified | divided 4834bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4835 const AsmToken &Tok = Parser.getTok(); 4836 if (Tok.isNot(AsmToken::Identifier)) 4837 return Error(L, "unexpected token in .syntax directive"); 4838 StringRef Mode = Tok.getString(); 4839 if (Mode == "unified" || Mode == "UNIFIED") 4840 Parser.Lex(); 4841 else if (Mode == "divided" || Mode == "DIVIDED") 4842 return Error(L, "'.syntax divided' arm asssembly not supported"); 4843 else 4844 return Error(L, "unrecognized syntax mode in .syntax directive"); 4845 4846 if (getLexer().isNot(AsmToken::EndOfStatement)) 4847 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4848 Parser.Lex(); 4849 4850 // TODO tell the MC streamer the mode 4851 // getParser().getStreamer().Emit???(); 4852 return false; 4853} 4854 4855/// parseDirectiveCode 4856/// ::= .code 16 | 32 4857bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4858 const AsmToken &Tok = Parser.getTok(); 4859 if (Tok.isNot(AsmToken::Integer)) 4860 return Error(L, "unexpected token in .code directive"); 4861 int64_t Val = Parser.getTok().getIntVal(); 4862 if (Val == 16) 4863 Parser.Lex(); 4864 else if (Val == 32) 4865 Parser.Lex(); 4866 else 4867 return Error(L, "invalid operand to .code directive"); 4868 4869 if (getLexer().isNot(AsmToken::EndOfStatement)) 4870 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4871 Parser.Lex(); 4872 4873 if (Val == 16) { 4874 if (!isThumb()) 4875 SwitchMode(); 4876 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4877 } else { 4878 if (isThumb()) 4879 SwitchMode(); 4880 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4881 } 4882 4883 return false; 4884} 4885 4886extern "C" void LLVMInitializeARMAsmLexer(); 4887 4888/// Force static initialization. 4889extern "C" void LLVMInitializeARMAsmParser() { 4890 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4891 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4892 LLVMInitializeARMAsmLexer(); 4893} 4894 4895#define GET_REGISTER_MATCHER 4896#define GET_MATCHER_IMPLEMENTATION 4897#include "ARMGenAsmMatcher.inc" 4898