ARMAsmParser.cpp revision a581328ceb4c9db165d79a4dabd6b28db799d70f
1//===-- ARMAsmParser.cpp - Parse ARM assembly to MCInst instructions ------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "MCTargetDesc/ARMBaseInfo.h" 11#include "MCTargetDesc/ARMAddressingModes.h" 12#include "MCTargetDesc/ARMMCExpr.h" 13#include "llvm/MC/MCParser/MCAsmLexer.h" 14#include "llvm/MC/MCParser/MCAsmParser.h" 15#include "llvm/MC/MCParser/MCParsedAsmOperand.h" 16#include "llvm/MC/MCAsmInfo.h" 17#include "llvm/MC/MCContext.h" 18#include "llvm/MC/MCStreamer.h" 19#include "llvm/MC/MCExpr.h" 20#include "llvm/MC/MCInst.h" 21#include "llvm/MC/MCInstrDesc.h" 22#include "llvm/MC/MCRegisterInfo.h" 23#include "llvm/MC/MCSubtargetInfo.h" 24#include "llvm/MC/MCTargetAsmParser.h" 25#include "llvm/Support/MathExtras.h" 26#include "llvm/Support/SourceMgr.h" 27#include "llvm/Support/TargetRegistry.h" 28#include "llvm/Support/raw_ostream.h" 29#include "llvm/ADT/BitVector.h" 30#include "llvm/ADT/OwningPtr.h" 31#include "llvm/ADT/STLExtras.h" 32#include "llvm/ADT/SmallVector.h" 33#include "llvm/ADT/StringExtras.h" 34#include "llvm/ADT/StringSwitch.h" 35#include "llvm/ADT/Twine.h" 36 37using namespace llvm; 38 39namespace { 40 41class ARMOperand; 42 43class ARMAsmParser : public MCTargetAsmParser { 44 MCSubtargetInfo &STI; 45 MCAsmParser &Parser; 46 47 struct { 48 ARMCC::CondCodes Cond; // Condition for IT block. 49 unsigned Mask:4; // Condition mask for instructions. 50 // Starting at first 1 (from lsb). 51 // '1' condition as indicated in IT. 52 // '0' inverse of condition (else). 53 // Count of instructions in IT block is 54 // 4 - trailingzeroes(mask) 55 56 bool FirstCond; // Explicit flag for when we're parsing the 57 // First instruction in the IT block. It's 58 // implied in the mask, so needs special 59 // handling. 60 61 unsigned CurPosition; // Current position in parsing of IT 62 // block. In range [0,3]. Initialized 63 // according to count of instructions in block. 64 // ~0U if no active IT block. 65 } ITState; 66 bool inITBlock() { return ITState.CurPosition != ~0U;} 67 void forwardITPosition() { 68 if (!inITBlock()) return; 69 // Move to the next instruction in the IT block, if there is one. If not, 70 // mark the block as done. 71 unsigned TZ = CountTrailingZeros_32(ITState.Mask); 72 if (++ITState.CurPosition == 5 - TZ) 73 ITState.CurPosition = ~0U; // Done with the IT block after this. 74 } 75 76 77 MCAsmParser &getParser() const { return Parser; } 78 MCAsmLexer &getLexer() const { return Parser.getLexer(); } 79 80 void Warning(SMLoc L, const Twine &Msg) { Parser.Warning(L, Msg); } 81 bool Error(SMLoc L, const Twine &Msg) { return Parser.Error(L, Msg); } 82 83 int tryParseRegister(); 84 bool tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &); 85 int tryParseShiftRegister(SmallVectorImpl<MCParsedAsmOperand*> &); 86 bool parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &); 87 bool parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &); 88 bool parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &, StringRef Mnemonic); 89 bool parsePrefix(ARMMCExpr::VariantKind &RefKind); 90 bool parseMemRegOffsetShift(ARM_AM::ShiftOpc &ShiftType, 91 unsigned &ShiftAmount); 92 bool parseDirectiveWord(unsigned Size, SMLoc L); 93 bool parseDirectiveThumb(SMLoc L); 94 bool parseDirectiveThumbFunc(SMLoc L); 95 bool parseDirectiveCode(SMLoc L); 96 bool parseDirectiveSyntax(SMLoc L); 97 98 StringRef splitMnemonic(StringRef Mnemonic, unsigned &PredicationCode, 99 bool &CarrySetting, unsigned &ProcessorIMod, 100 StringRef &ITMask); 101 void getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 102 bool &CanAcceptPredicationCode); 103 104 bool isThumb() const { 105 // FIXME: Can tablegen auto-generate this? 106 return (STI.getFeatureBits() & ARM::ModeThumb) != 0; 107 } 108 bool isThumbOne() const { 109 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2) == 0; 110 } 111 bool isThumbTwo() const { 112 return isThumb() && (STI.getFeatureBits() & ARM::FeatureThumb2); 113 } 114 bool hasV6Ops() const { 115 return STI.getFeatureBits() & ARM::HasV6Ops; 116 } 117 bool hasV7Ops() const { 118 return STI.getFeatureBits() & ARM::HasV7Ops; 119 } 120 void SwitchMode() { 121 unsigned FB = ComputeAvailableFeatures(STI.ToggleFeature(ARM::ModeThumb)); 122 setAvailableFeatures(FB); 123 } 124 bool isMClass() const { 125 return STI.getFeatureBits() & ARM::FeatureMClass; 126 } 127 128 /// @name Auto-generated Match Functions 129 /// { 130 131#define GET_ASSEMBLER_HEADER 132#include "ARMGenAsmMatcher.inc" 133 134 /// } 135 136 OperandMatchResultTy parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*>&); 137 OperandMatchResultTy parseCoprocNumOperand( 138 SmallVectorImpl<MCParsedAsmOperand*>&); 139 OperandMatchResultTy parseCoprocRegOperand( 140 SmallVectorImpl<MCParsedAsmOperand*>&); 141 OperandMatchResultTy parseCoprocOptionOperand( 142 SmallVectorImpl<MCParsedAsmOperand*>&); 143 OperandMatchResultTy parseMemBarrierOptOperand( 144 SmallVectorImpl<MCParsedAsmOperand*>&); 145 OperandMatchResultTy parseProcIFlagsOperand( 146 SmallVectorImpl<MCParsedAsmOperand*>&); 147 OperandMatchResultTy parseMSRMaskOperand( 148 SmallVectorImpl<MCParsedAsmOperand*>&); 149 OperandMatchResultTy parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &O, 150 StringRef Op, int Low, int High); 151 OperandMatchResultTy parsePKHLSLImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 152 return parsePKHImm(O, "lsl", 0, 31); 153 } 154 OperandMatchResultTy parsePKHASRImm(SmallVectorImpl<MCParsedAsmOperand*> &O) { 155 return parsePKHImm(O, "asr", 1, 32); 156 } 157 OperandMatchResultTy parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*>&); 158 OperandMatchResultTy parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*>&); 159 OperandMatchResultTy parseRotImm(SmallVectorImpl<MCParsedAsmOperand*>&); 160 OperandMatchResultTy parseBitfield(SmallVectorImpl<MCParsedAsmOperand*>&); 161 OperandMatchResultTy parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*>&); 162 OperandMatchResultTy parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*>&); 163 OperandMatchResultTy parseFPImm(SmallVectorImpl<MCParsedAsmOperand*>&); 164 OperandMatchResultTy parseVectorList(SmallVectorImpl<MCParsedAsmOperand*>&); 165 166 // Asm Match Converter Methods 167 bool cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 168 const SmallVectorImpl<MCParsedAsmOperand*> &); 169 bool cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 170 const SmallVectorImpl<MCParsedAsmOperand*> &); 171 bool cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 172 const SmallVectorImpl<MCParsedAsmOperand*> &); 173 bool cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 174 const SmallVectorImpl<MCParsedAsmOperand*> &); 175 bool cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 176 const SmallVectorImpl<MCParsedAsmOperand*> &); 177 bool cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 178 const SmallVectorImpl<MCParsedAsmOperand*> &); 179 bool cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 180 const SmallVectorImpl<MCParsedAsmOperand*> &); 181 bool cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 182 const SmallVectorImpl<MCParsedAsmOperand*> &); 183 bool cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 184 const SmallVectorImpl<MCParsedAsmOperand*> &); 185 bool cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 186 const SmallVectorImpl<MCParsedAsmOperand*> &); 187 bool cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 188 const SmallVectorImpl<MCParsedAsmOperand*> &); 189 bool cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 190 const SmallVectorImpl<MCParsedAsmOperand*> &); 191 bool cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 192 const SmallVectorImpl<MCParsedAsmOperand*> &); 193 bool cvtLdrdPre(MCInst &Inst, unsigned Opcode, 194 const SmallVectorImpl<MCParsedAsmOperand*> &); 195 bool cvtStrdPre(MCInst &Inst, unsigned Opcode, 196 const SmallVectorImpl<MCParsedAsmOperand*> &); 197 bool cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 198 const SmallVectorImpl<MCParsedAsmOperand*> &); 199 bool cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 200 const SmallVectorImpl<MCParsedAsmOperand*> &); 201 bool cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 202 const SmallVectorImpl<MCParsedAsmOperand*> &); 203 bool cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 204 const SmallVectorImpl<MCParsedAsmOperand*> &); 205 206 bool validateInstruction(MCInst &Inst, 207 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 208 void processInstruction(MCInst &Inst, 209 const SmallVectorImpl<MCParsedAsmOperand*> &Ops); 210 bool shouldOmitCCOutOperand(StringRef Mnemonic, 211 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 212 213public: 214 enum ARMMatchResultTy { 215 Match_RequiresITBlock = FIRST_TARGET_MATCH_RESULT_TY, 216 Match_RequiresNotITBlock, 217 Match_RequiresV6, 218 Match_RequiresThumb2 219 }; 220 221 ARMAsmParser(MCSubtargetInfo &_STI, MCAsmParser &_Parser) 222 : MCTargetAsmParser(), STI(_STI), Parser(_Parser) { 223 MCAsmParserExtension::Initialize(_Parser); 224 225 // Initialize the set of available features. 226 setAvailableFeatures(ComputeAvailableFeatures(STI.getFeatureBits())); 227 228 // Not in an ITBlock to start with. 229 ITState.CurPosition = ~0U; 230 } 231 232 // Implementation of the MCTargetAsmParser interface: 233 bool ParseRegister(unsigned &RegNo, SMLoc &StartLoc, SMLoc &EndLoc); 234 bool ParseInstruction(StringRef Name, SMLoc NameLoc, 235 SmallVectorImpl<MCParsedAsmOperand*> &Operands); 236 bool ParseDirective(AsmToken DirectiveID); 237 238 unsigned checkTargetMatchPredicate(MCInst &Inst); 239 240 bool MatchAndEmitInstruction(SMLoc IDLoc, 241 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 242 MCStreamer &Out); 243}; 244} // end anonymous namespace 245 246namespace { 247 248/// ARMOperand - Instances of this class represent a parsed ARM machine 249/// instruction. 250class ARMOperand : public MCParsedAsmOperand { 251 enum KindTy { 252 k_CondCode, 253 k_CCOut, 254 k_ITCondMask, 255 k_CoprocNum, 256 k_CoprocReg, 257 k_CoprocOption, 258 k_Immediate, 259 k_FPImmediate, 260 k_MemBarrierOpt, 261 k_Memory, 262 k_PostIndexRegister, 263 k_MSRMask, 264 k_ProcIFlags, 265 k_VectorIndex, 266 k_Register, 267 k_RegisterList, 268 k_DPRRegisterList, 269 k_SPRRegisterList, 270 k_VectorList, 271 k_ShiftedRegister, 272 k_ShiftedImmediate, 273 k_ShifterImmediate, 274 k_RotateImmediate, 275 k_BitfieldDescriptor, 276 k_Token 277 } Kind; 278 279 SMLoc StartLoc, EndLoc; 280 SmallVector<unsigned, 8> Registers; 281 282 union { 283 struct { 284 ARMCC::CondCodes Val; 285 } CC; 286 287 struct { 288 unsigned Val; 289 } Cop; 290 291 struct { 292 unsigned Val; 293 } CoprocOption; 294 295 struct { 296 unsigned Mask:4; 297 } ITMask; 298 299 struct { 300 ARM_MB::MemBOpt Val; 301 } MBOpt; 302 303 struct { 304 ARM_PROC::IFlags Val; 305 } IFlags; 306 307 struct { 308 unsigned Val; 309 } MMask; 310 311 struct { 312 const char *Data; 313 unsigned Length; 314 } Tok; 315 316 struct { 317 unsigned RegNum; 318 } Reg; 319 320 // A vector register list is a sequential list of 1 to 4 registers. 321 struct { 322 unsigned RegNum; 323 unsigned Count; 324 } VectorList; 325 326 struct { 327 unsigned Val; 328 } VectorIndex; 329 330 struct { 331 const MCExpr *Val; 332 } Imm; 333 334 struct { 335 unsigned Val; // encoded 8-bit representation 336 } FPImm; 337 338 /// Combined record for all forms of ARM address expressions. 339 struct { 340 unsigned BaseRegNum; 341 // Offset is in OffsetReg or OffsetImm. If both are zero, no offset 342 // was specified. 343 const MCConstantExpr *OffsetImm; // Offset immediate value 344 unsigned OffsetRegNum; // Offset register num, when OffsetImm == NULL 345 ARM_AM::ShiftOpc ShiftType; // Shift type for OffsetReg 346 unsigned ShiftImm; // shift for OffsetReg. 347 unsigned Alignment; // 0 = no alignment specified 348 // n = alignment in bytes (8, 16, or 32) 349 unsigned isNegative : 1; // Negated OffsetReg? (~'U' bit) 350 } Memory; 351 352 struct { 353 unsigned RegNum; 354 bool isAdd; 355 ARM_AM::ShiftOpc ShiftTy; 356 unsigned ShiftImm; 357 } PostIdxReg; 358 359 struct { 360 bool isASR; 361 unsigned Imm; 362 } ShifterImm; 363 struct { 364 ARM_AM::ShiftOpc ShiftTy; 365 unsigned SrcReg; 366 unsigned ShiftReg; 367 unsigned ShiftImm; 368 } RegShiftedReg; 369 struct { 370 ARM_AM::ShiftOpc ShiftTy; 371 unsigned SrcReg; 372 unsigned ShiftImm; 373 } RegShiftedImm; 374 struct { 375 unsigned Imm; 376 } RotImm; 377 struct { 378 unsigned LSB; 379 unsigned Width; 380 } Bitfield; 381 }; 382 383 ARMOperand(KindTy K) : MCParsedAsmOperand(), Kind(K) {} 384public: 385 ARMOperand(const ARMOperand &o) : MCParsedAsmOperand() { 386 Kind = o.Kind; 387 StartLoc = o.StartLoc; 388 EndLoc = o.EndLoc; 389 switch (Kind) { 390 case k_CondCode: 391 CC = o.CC; 392 break; 393 case k_ITCondMask: 394 ITMask = o.ITMask; 395 break; 396 case k_Token: 397 Tok = o.Tok; 398 break; 399 case k_CCOut: 400 case k_Register: 401 Reg = o.Reg; 402 break; 403 case k_RegisterList: 404 case k_DPRRegisterList: 405 case k_SPRRegisterList: 406 Registers = o.Registers; 407 break; 408 case k_VectorList: 409 VectorList = o.VectorList; 410 break; 411 case k_CoprocNum: 412 case k_CoprocReg: 413 Cop = o.Cop; 414 break; 415 case k_CoprocOption: 416 CoprocOption = o.CoprocOption; 417 break; 418 case k_Immediate: 419 Imm = o.Imm; 420 break; 421 case k_FPImmediate: 422 FPImm = o.FPImm; 423 break; 424 case k_MemBarrierOpt: 425 MBOpt = o.MBOpt; 426 break; 427 case k_Memory: 428 Memory = o.Memory; 429 break; 430 case k_PostIndexRegister: 431 PostIdxReg = o.PostIdxReg; 432 break; 433 case k_MSRMask: 434 MMask = o.MMask; 435 break; 436 case k_ProcIFlags: 437 IFlags = o.IFlags; 438 break; 439 case k_ShifterImmediate: 440 ShifterImm = o.ShifterImm; 441 break; 442 case k_ShiftedRegister: 443 RegShiftedReg = o.RegShiftedReg; 444 break; 445 case k_ShiftedImmediate: 446 RegShiftedImm = o.RegShiftedImm; 447 break; 448 case k_RotateImmediate: 449 RotImm = o.RotImm; 450 break; 451 case k_BitfieldDescriptor: 452 Bitfield = o.Bitfield; 453 break; 454 case k_VectorIndex: 455 VectorIndex = o.VectorIndex; 456 break; 457 } 458 } 459 460 /// getStartLoc - Get the location of the first token of this operand. 461 SMLoc getStartLoc() const { return StartLoc; } 462 /// getEndLoc - Get the location of the last token of this operand. 463 SMLoc getEndLoc() const { return EndLoc; } 464 465 ARMCC::CondCodes getCondCode() const { 466 assert(Kind == k_CondCode && "Invalid access!"); 467 return CC.Val; 468 } 469 470 unsigned getCoproc() const { 471 assert((Kind == k_CoprocNum || Kind == k_CoprocReg) && "Invalid access!"); 472 return Cop.Val; 473 } 474 475 StringRef getToken() const { 476 assert(Kind == k_Token && "Invalid access!"); 477 return StringRef(Tok.Data, Tok.Length); 478 } 479 480 unsigned getReg() const { 481 assert((Kind == k_Register || Kind == k_CCOut) && "Invalid access!"); 482 return Reg.RegNum; 483 } 484 485 const SmallVectorImpl<unsigned> &getRegList() const { 486 assert((Kind == k_RegisterList || Kind == k_DPRRegisterList || 487 Kind == k_SPRRegisterList) && "Invalid access!"); 488 return Registers; 489 } 490 491 const MCExpr *getImm() const { 492 assert(Kind == k_Immediate && "Invalid access!"); 493 return Imm.Val; 494 } 495 496 unsigned getFPImm() const { 497 assert(Kind == k_FPImmediate && "Invalid access!"); 498 return FPImm.Val; 499 } 500 501 unsigned getVectorIndex() const { 502 assert(Kind == k_VectorIndex && "Invalid access!"); 503 return VectorIndex.Val; 504 } 505 506 ARM_MB::MemBOpt getMemBarrierOpt() const { 507 assert(Kind == k_MemBarrierOpt && "Invalid access!"); 508 return MBOpt.Val; 509 } 510 511 ARM_PROC::IFlags getProcIFlags() const { 512 assert(Kind == k_ProcIFlags && "Invalid access!"); 513 return IFlags.Val; 514 } 515 516 unsigned getMSRMask() const { 517 assert(Kind == k_MSRMask && "Invalid access!"); 518 return MMask.Val; 519 } 520 521 bool isCoprocNum() const { return Kind == k_CoprocNum; } 522 bool isCoprocReg() const { return Kind == k_CoprocReg; } 523 bool isCoprocOption() const { return Kind == k_CoprocOption; } 524 bool isCondCode() const { return Kind == k_CondCode; } 525 bool isCCOut() const { return Kind == k_CCOut; } 526 bool isITMask() const { return Kind == k_ITCondMask; } 527 bool isITCondCode() const { return Kind == k_CondCode; } 528 bool isImm() const { return Kind == k_Immediate; } 529 bool isFPImm() const { return Kind == k_FPImmediate; } 530 bool isImm8s4() const { 531 if (Kind != k_Immediate) 532 return false; 533 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 534 if (!CE) return false; 535 int64_t Value = CE->getValue(); 536 return ((Value & 3) == 0) && Value >= -1020 && Value <= 1020; 537 } 538 bool isImm0_1020s4() const { 539 if (Kind != k_Immediate) 540 return false; 541 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 542 if (!CE) return false; 543 int64_t Value = CE->getValue(); 544 return ((Value & 3) == 0) && Value >= 0 && Value <= 1020; 545 } 546 bool isImm0_508s4() const { 547 if (Kind != k_Immediate) 548 return false; 549 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 550 if (!CE) return false; 551 int64_t Value = CE->getValue(); 552 return ((Value & 3) == 0) && Value >= 0 && Value <= 508; 553 } 554 bool isImm0_255() const { 555 if (Kind != k_Immediate) 556 return false; 557 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 558 if (!CE) return false; 559 int64_t Value = CE->getValue(); 560 return Value >= 0 && Value < 256; 561 } 562 bool isImm0_7() const { 563 if (Kind != k_Immediate) 564 return false; 565 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 566 if (!CE) return false; 567 int64_t Value = CE->getValue(); 568 return Value >= 0 && Value < 8; 569 } 570 bool isImm0_15() const { 571 if (Kind != k_Immediate) 572 return false; 573 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 574 if (!CE) return false; 575 int64_t Value = CE->getValue(); 576 return Value >= 0 && Value < 16; 577 } 578 bool isImm0_31() const { 579 if (Kind != k_Immediate) 580 return false; 581 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 582 if (!CE) return false; 583 int64_t Value = CE->getValue(); 584 return Value >= 0 && Value < 32; 585 } 586 bool isImm1_16() const { 587 if (Kind != k_Immediate) 588 return false; 589 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 590 if (!CE) return false; 591 int64_t Value = CE->getValue(); 592 return Value > 0 && Value < 17; 593 } 594 bool isImm1_32() const { 595 if (Kind != k_Immediate) 596 return false; 597 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 598 if (!CE) return false; 599 int64_t Value = CE->getValue(); 600 return Value > 0 && Value < 33; 601 } 602 bool isImm0_65535() const { 603 if (Kind != k_Immediate) 604 return false; 605 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 606 if (!CE) return false; 607 int64_t Value = CE->getValue(); 608 return Value >= 0 && Value < 65536; 609 } 610 bool isImm0_65535Expr() const { 611 if (Kind != k_Immediate) 612 return false; 613 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 614 // If it's not a constant expression, it'll generate a fixup and be 615 // handled later. 616 if (!CE) return true; 617 int64_t Value = CE->getValue(); 618 return Value >= 0 && Value < 65536; 619 } 620 bool isImm24bit() const { 621 if (Kind != k_Immediate) 622 return false; 623 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 624 if (!CE) return false; 625 int64_t Value = CE->getValue(); 626 return Value >= 0 && Value <= 0xffffff; 627 } 628 bool isImmThumbSR() const { 629 if (Kind != k_Immediate) 630 return false; 631 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 632 if (!CE) return false; 633 int64_t Value = CE->getValue(); 634 return Value > 0 && Value < 33; 635 } 636 bool isPKHLSLImm() const { 637 if (Kind != k_Immediate) 638 return false; 639 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 640 if (!CE) return false; 641 int64_t Value = CE->getValue(); 642 return Value >= 0 && Value < 32; 643 } 644 bool isPKHASRImm() const { 645 if (Kind != k_Immediate) 646 return false; 647 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 648 if (!CE) return false; 649 int64_t Value = CE->getValue(); 650 return Value > 0 && Value <= 32; 651 } 652 bool isARMSOImm() const { 653 if (Kind != k_Immediate) 654 return false; 655 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 656 if (!CE) return false; 657 int64_t Value = CE->getValue(); 658 return ARM_AM::getSOImmVal(Value) != -1; 659 } 660 bool isT2SOImm() const { 661 if (Kind != k_Immediate) 662 return false; 663 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 664 if (!CE) return false; 665 int64_t Value = CE->getValue(); 666 return ARM_AM::getT2SOImmVal(Value) != -1; 667 } 668 bool isSetEndImm() const { 669 if (Kind != k_Immediate) 670 return false; 671 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 672 if (!CE) return false; 673 int64_t Value = CE->getValue(); 674 return Value == 1 || Value == 0; 675 } 676 bool isReg() const { return Kind == k_Register; } 677 bool isRegList() const { return Kind == k_RegisterList; } 678 bool isDPRRegList() const { return Kind == k_DPRRegisterList; } 679 bool isSPRRegList() const { return Kind == k_SPRRegisterList; } 680 bool isToken() const { return Kind == k_Token; } 681 bool isMemBarrierOpt() const { return Kind == k_MemBarrierOpt; } 682 bool isMemory() const { return Kind == k_Memory; } 683 bool isShifterImm() const { return Kind == k_ShifterImmediate; } 684 bool isRegShiftedReg() const { return Kind == k_ShiftedRegister; } 685 bool isRegShiftedImm() const { return Kind == k_ShiftedImmediate; } 686 bool isRotImm() const { return Kind == k_RotateImmediate; } 687 bool isBitfield() const { return Kind == k_BitfieldDescriptor; } 688 bool isPostIdxRegShifted() const { return Kind == k_PostIndexRegister; } 689 bool isPostIdxReg() const { 690 return Kind == k_PostIndexRegister && PostIdxReg.ShiftTy == ARM_AM::no_shift; 691 } 692 bool isMemNoOffset(bool alignOK = false) const { 693 if (!isMemory()) 694 return false; 695 // No offset of any kind. 696 return Memory.OffsetRegNum == 0 && Memory.OffsetImm == 0 && 697 (alignOK || Memory.Alignment == 0); 698 } 699 bool isAlignedMemory() const { 700 return isMemNoOffset(true); 701 } 702 bool isAddrMode2() const { 703 if (!isMemory() || Memory.Alignment != 0) return false; 704 // Check for register offset. 705 if (Memory.OffsetRegNum) return true; 706 // Immediate offset in range [-4095, 4095]. 707 if (!Memory.OffsetImm) return true; 708 int64_t Val = Memory.OffsetImm->getValue(); 709 return Val > -4096 && Val < 4096; 710 } 711 bool isAM2OffsetImm() const { 712 if (Kind != k_Immediate) 713 return false; 714 // Immediate offset in range [-4095, 4095]. 715 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 716 if (!CE) return false; 717 int64_t Val = CE->getValue(); 718 return Val > -4096 && Val < 4096; 719 } 720 bool isAddrMode3() const { 721 if (!isMemory() || Memory.Alignment != 0) return false; 722 // No shifts are legal for AM3. 723 if (Memory.ShiftType != ARM_AM::no_shift) return false; 724 // Check for register offset. 725 if (Memory.OffsetRegNum) return true; 726 // Immediate offset in range [-255, 255]. 727 if (!Memory.OffsetImm) return true; 728 int64_t Val = Memory.OffsetImm->getValue(); 729 return Val > -256 && Val < 256; 730 } 731 bool isAM3Offset() const { 732 if (Kind != k_Immediate && Kind != k_PostIndexRegister) 733 return false; 734 if (Kind == k_PostIndexRegister) 735 return PostIdxReg.ShiftTy == ARM_AM::no_shift; 736 // Immediate offset in range [-255, 255]. 737 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 738 if (!CE) return false; 739 int64_t Val = CE->getValue(); 740 // Special case, #-0 is INT32_MIN. 741 return (Val > -256 && Val < 256) || Val == INT32_MIN; 742 } 743 bool isAddrMode5() const { 744 if (!isMemory() || Memory.Alignment != 0) return false; 745 // Check for register offset. 746 if (Memory.OffsetRegNum) return false; 747 // Immediate offset in range [-1020, 1020] and a multiple of 4. 748 if (!Memory.OffsetImm) return true; 749 int64_t Val = Memory.OffsetImm->getValue(); 750 return (Val >= -1020 && Val <= 1020 && ((Val & 3) == 0)) || 751 Val == INT32_MIN; 752 } 753 bool isMemTBB() const { 754 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 755 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 756 return false; 757 return true; 758 } 759 bool isMemTBH() const { 760 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 761 Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm != 1 || 762 Memory.Alignment != 0 ) 763 return false; 764 return true; 765 } 766 bool isMemRegOffset() const { 767 if (!isMemory() || !Memory.OffsetRegNum || Memory.Alignment != 0) 768 return false; 769 return true; 770 } 771 bool isT2MemRegOffset() const { 772 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 773 Memory.Alignment != 0) 774 return false; 775 // Only lsl #{0, 1, 2, 3} allowed. 776 if (Memory.ShiftType == ARM_AM::no_shift) 777 return true; 778 if (Memory.ShiftType != ARM_AM::lsl || Memory.ShiftImm > 3) 779 return false; 780 return true; 781 } 782 bool isMemThumbRR() const { 783 // Thumb reg+reg addressing is simple. Just two registers, a base and 784 // an offset. No shifts, negations or any other complicating factors. 785 if (!isMemory() || !Memory.OffsetRegNum || Memory.isNegative || 786 Memory.ShiftType != ARM_AM::no_shift || Memory.Alignment != 0) 787 return false; 788 return isARMLowRegister(Memory.BaseRegNum) && 789 (!Memory.OffsetRegNum || isARMLowRegister(Memory.OffsetRegNum)); 790 } 791 bool isMemThumbRIs4() const { 792 if (!isMemory() || Memory.OffsetRegNum != 0 || 793 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 794 return false; 795 // Immediate offset, multiple of 4 in range [0, 124]. 796 if (!Memory.OffsetImm) return true; 797 int64_t Val = Memory.OffsetImm->getValue(); 798 return Val >= 0 && Val <= 124 && (Val % 4) == 0; 799 } 800 bool isMemThumbRIs2() const { 801 if (!isMemory() || Memory.OffsetRegNum != 0 || 802 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 803 return false; 804 // Immediate offset, multiple of 4 in range [0, 62]. 805 if (!Memory.OffsetImm) return true; 806 int64_t Val = Memory.OffsetImm->getValue(); 807 return Val >= 0 && Val <= 62 && (Val % 2) == 0; 808 } 809 bool isMemThumbRIs1() const { 810 if (!isMemory() || Memory.OffsetRegNum != 0 || 811 !isARMLowRegister(Memory.BaseRegNum) || Memory.Alignment != 0) 812 return false; 813 // Immediate offset in range [0, 31]. 814 if (!Memory.OffsetImm) return true; 815 int64_t Val = Memory.OffsetImm->getValue(); 816 return Val >= 0 && Val <= 31; 817 } 818 bool isMemThumbSPI() const { 819 if (!isMemory() || Memory.OffsetRegNum != 0 || 820 Memory.BaseRegNum != ARM::SP || Memory.Alignment != 0) 821 return false; 822 // Immediate offset, multiple of 4 in range [0, 1020]. 823 if (!Memory.OffsetImm) return true; 824 int64_t Val = Memory.OffsetImm->getValue(); 825 return Val >= 0 && Val <= 1020 && (Val % 4) == 0; 826 } 827 bool isMemImm8s4Offset() const { 828 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 829 return false; 830 // Immediate offset a multiple of 4 in range [-1020, 1020]. 831 if (!Memory.OffsetImm) return true; 832 int64_t Val = Memory.OffsetImm->getValue(); 833 return Val >= -1020 && Val <= 1020 && (Val & 3) == 0; 834 } 835 bool isMemImm0_1020s4Offset() const { 836 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 837 return false; 838 // Immediate offset a multiple of 4 in range [0, 1020]. 839 if (!Memory.OffsetImm) return true; 840 int64_t Val = Memory.OffsetImm->getValue(); 841 return Val >= 0 && Val <= 1020 && (Val & 3) == 0; 842 } 843 bool isMemImm8Offset() const { 844 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 845 return false; 846 // Immediate offset in range [-255, 255]. 847 if (!Memory.OffsetImm) return true; 848 int64_t Val = Memory.OffsetImm->getValue(); 849 return (Val == INT32_MIN) || (Val > -256 && Val < 256); 850 } 851 bool isMemPosImm8Offset() const { 852 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 853 return false; 854 // Immediate offset in range [0, 255]. 855 if (!Memory.OffsetImm) return true; 856 int64_t Val = Memory.OffsetImm->getValue(); 857 return Val >= 0 && Val < 256; 858 } 859 bool isMemNegImm8Offset() const { 860 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 861 return false; 862 // Immediate offset in range [-255, -1]. 863 if (!Memory.OffsetImm) return true; 864 int64_t Val = Memory.OffsetImm->getValue(); 865 return Val > -256 && Val < 0; 866 } 867 bool isMemUImm12Offset() const { 868 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 869 return false; 870 // Immediate offset in range [0, 4095]. 871 if (!Memory.OffsetImm) return true; 872 int64_t Val = Memory.OffsetImm->getValue(); 873 return (Val >= 0 && Val < 4096); 874 } 875 bool isMemImm12Offset() const { 876 // If we have an immediate that's not a constant, treat it as a label 877 // reference needing a fixup. If it is a constant, it's something else 878 // and we reject it. 879 if (Kind == k_Immediate && !isa<MCConstantExpr>(getImm())) 880 return true; 881 882 if (!isMemory() || Memory.OffsetRegNum != 0 || Memory.Alignment != 0) 883 return false; 884 // Immediate offset in range [-4095, 4095]. 885 if (!Memory.OffsetImm) return true; 886 int64_t Val = Memory.OffsetImm->getValue(); 887 return (Val > -4096 && Val < 4096) || (Val == INT32_MIN); 888 } 889 bool isPostIdxImm8() const { 890 if (Kind != k_Immediate) 891 return false; 892 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 893 if (!CE) return false; 894 int64_t Val = CE->getValue(); 895 return (Val > -256 && Val < 256) || (Val == INT32_MIN); 896 } 897 bool isPostIdxImm8s4() const { 898 if (Kind != k_Immediate) 899 return false; 900 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 901 if (!CE) return false; 902 int64_t Val = CE->getValue(); 903 return ((Val & 3) == 0 && Val >= -1020 && Val <= 1020) || 904 (Val == INT32_MIN); 905 } 906 907 bool isMSRMask() const { return Kind == k_MSRMask; } 908 bool isProcIFlags() const { return Kind == k_ProcIFlags; } 909 910 // NEON operands. 911 bool isVecListOneD() const { 912 if (Kind != k_VectorList) return false; 913 return VectorList.Count == 1; 914 } 915 916 bool isVecListTwoD() const { 917 if (Kind != k_VectorList) return false; 918 return VectorList.Count == 2; 919 } 920 921 bool isVecListThreeD() const { 922 if (Kind != k_VectorList) return false; 923 return VectorList.Count == 3; 924 } 925 926 bool isVecListFourD() const { 927 if (Kind != k_VectorList) return false; 928 return VectorList.Count == 4; 929 } 930 931 bool isVecListTwoQ() const { 932 if (Kind != k_VectorList) return false; 933 //FIXME: We haven't taught the parser to handle by-two register lists 934 // yet, so don't pretend to know one. 935 return VectorList.Count == 2 && false; 936 } 937 938 bool isVectorIndex8() const { 939 if (Kind != k_VectorIndex) return false; 940 return VectorIndex.Val < 8; 941 } 942 bool isVectorIndex16() const { 943 if (Kind != k_VectorIndex) return false; 944 return VectorIndex.Val < 4; 945 } 946 bool isVectorIndex32() const { 947 if (Kind != k_VectorIndex) return false; 948 return VectorIndex.Val < 2; 949 } 950 951 bool isNEONi8splat() const { 952 if (Kind != k_Immediate) 953 return false; 954 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 955 // Must be a constant. 956 if (!CE) return false; 957 int64_t Value = CE->getValue(); 958 // i8 value splatted across 8 bytes. The immediate is just the 8 byte 959 // value. 960 return Value >= 0 && Value < 256; 961 } 962 963 bool isNEONi16splat() const { 964 if (Kind != k_Immediate) 965 return false; 966 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 967 // Must be a constant. 968 if (!CE) return false; 969 int64_t Value = CE->getValue(); 970 // i16 value in the range [0,255] or [0x0100, 0xff00] 971 return (Value >= 0 && Value < 256) || (Value >= 0x0100 && Value <= 0xff00); 972 } 973 974 bool isNEONi32splat() const { 975 if (Kind != k_Immediate) 976 return false; 977 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 978 // Must be a constant. 979 if (!CE) return false; 980 int64_t Value = CE->getValue(); 981 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X. 982 return (Value >= 0 && Value < 256) || 983 (Value >= 0x0100 && Value <= 0xff00) || 984 (Value >= 0x010000 && Value <= 0xff0000) || 985 (Value >= 0x01000000 && Value <= 0xff000000); 986 } 987 988 bool isNEONi32vmov() const { 989 if (Kind != k_Immediate) 990 return false; 991 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 992 // Must be a constant. 993 if (!CE) return false; 994 int64_t Value = CE->getValue(); 995 // i32 value with set bits only in one byte X000, 0X00, 00X0, or 000X, 996 // for VMOV/VMVN only, 00Xf or 0Xff are also accepted. 997 return (Value >= 0 && Value < 256) || 998 (Value >= 0x0100 && Value <= 0xff00) || 999 (Value >= 0x010000 && Value <= 0xff0000) || 1000 (Value >= 0x01000000 && Value <= 0xff000000) || 1001 (Value >= 0x01ff && Value <= 0xffff && (Value & 0xff) == 0xff) || 1002 (Value >= 0x01ffff && Value <= 0xffffff && (Value & 0xffff) == 0xffff); 1003 } 1004 1005 bool isNEONi64splat() const { 1006 if (Kind != k_Immediate) 1007 return false; 1008 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1009 // Must be a constant. 1010 if (!CE) return false; 1011 uint64_t Value = CE->getValue(); 1012 // i64 value with each byte being either 0 or 0xff. 1013 for (unsigned i = 0; i < 8; ++i) 1014 if ((Value & 0xff) != 0 && (Value & 0xff) != 0xff) return false; 1015 return true; 1016 } 1017 1018 void addExpr(MCInst &Inst, const MCExpr *Expr) const { 1019 // Add as immediates when possible. Null MCExpr = 0. 1020 if (Expr == 0) 1021 Inst.addOperand(MCOperand::CreateImm(0)); 1022 else if (const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr)) 1023 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1024 else 1025 Inst.addOperand(MCOperand::CreateExpr(Expr)); 1026 } 1027 1028 void addCondCodeOperands(MCInst &Inst, unsigned N) const { 1029 assert(N == 2 && "Invalid number of operands!"); 1030 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1031 unsigned RegNum = getCondCode() == ARMCC::AL ? 0: ARM::CPSR; 1032 Inst.addOperand(MCOperand::CreateReg(RegNum)); 1033 } 1034 1035 void addCoprocNumOperands(MCInst &Inst, unsigned N) const { 1036 assert(N == 1 && "Invalid number of operands!"); 1037 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1038 } 1039 1040 void addCoprocRegOperands(MCInst &Inst, unsigned N) const { 1041 assert(N == 1 && "Invalid number of operands!"); 1042 Inst.addOperand(MCOperand::CreateImm(getCoproc())); 1043 } 1044 1045 void addCoprocOptionOperands(MCInst &Inst, unsigned N) const { 1046 assert(N == 1 && "Invalid number of operands!"); 1047 Inst.addOperand(MCOperand::CreateImm(CoprocOption.Val)); 1048 } 1049 1050 void addITMaskOperands(MCInst &Inst, unsigned N) const { 1051 assert(N == 1 && "Invalid number of operands!"); 1052 Inst.addOperand(MCOperand::CreateImm(ITMask.Mask)); 1053 } 1054 1055 void addITCondCodeOperands(MCInst &Inst, unsigned N) const { 1056 assert(N == 1 && "Invalid number of operands!"); 1057 Inst.addOperand(MCOperand::CreateImm(unsigned(getCondCode()))); 1058 } 1059 1060 void addCCOutOperands(MCInst &Inst, unsigned N) const { 1061 assert(N == 1 && "Invalid number of operands!"); 1062 Inst.addOperand(MCOperand::CreateReg(getReg())); 1063 } 1064 1065 void addRegOperands(MCInst &Inst, unsigned N) const { 1066 assert(N == 1 && "Invalid number of operands!"); 1067 Inst.addOperand(MCOperand::CreateReg(getReg())); 1068 } 1069 1070 void addRegShiftedRegOperands(MCInst &Inst, unsigned N) const { 1071 assert(N == 3 && "Invalid number of operands!"); 1072 assert(isRegShiftedReg() && "addRegShiftedRegOperands() on non RegShiftedReg!"); 1073 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.SrcReg)); 1074 Inst.addOperand(MCOperand::CreateReg(RegShiftedReg.ShiftReg)); 1075 Inst.addOperand(MCOperand::CreateImm( 1076 ARM_AM::getSORegOpc(RegShiftedReg.ShiftTy, RegShiftedReg.ShiftImm))); 1077 } 1078 1079 void addRegShiftedImmOperands(MCInst &Inst, unsigned N) const { 1080 assert(N == 2 && "Invalid number of operands!"); 1081 assert(isRegShiftedImm() && "addRegShiftedImmOperands() on non RegShiftedImm!"); 1082 Inst.addOperand(MCOperand::CreateReg(RegShiftedImm.SrcReg)); 1083 Inst.addOperand(MCOperand::CreateImm( 1084 ARM_AM::getSORegOpc(RegShiftedImm.ShiftTy, RegShiftedImm.ShiftImm))); 1085 } 1086 1087 void addShifterImmOperands(MCInst &Inst, unsigned N) const { 1088 assert(N == 1 && "Invalid number of operands!"); 1089 Inst.addOperand(MCOperand::CreateImm((ShifterImm.isASR << 5) | 1090 ShifterImm.Imm)); 1091 } 1092 1093 void addRegListOperands(MCInst &Inst, unsigned N) const { 1094 assert(N == 1 && "Invalid number of operands!"); 1095 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1096 for (SmallVectorImpl<unsigned>::const_iterator 1097 I = RegList.begin(), E = RegList.end(); I != E; ++I) 1098 Inst.addOperand(MCOperand::CreateReg(*I)); 1099 } 1100 1101 void addDPRRegListOperands(MCInst &Inst, unsigned N) const { 1102 addRegListOperands(Inst, N); 1103 } 1104 1105 void addSPRRegListOperands(MCInst &Inst, unsigned N) const { 1106 addRegListOperands(Inst, N); 1107 } 1108 1109 void addRotImmOperands(MCInst &Inst, unsigned N) const { 1110 assert(N == 1 && "Invalid number of operands!"); 1111 // Encoded as val>>3. The printer handles display as 8, 16, 24. 1112 Inst.addOperand(MCOperand::CreateImm(RotImm.Imm >> 3)); 1113 } 1114 1115 void addBitfieldOperands(MCInst &Inst, unsigned N) const { 1116 assert(N == 1 && "Invalid number of operands!"); 1117 // Munge the lsb/width into a bitfield mask. 1118 unsigned lsb = Bitfield.LSB; 1119 unsigned width = Bitfield.Width; 1120 // Make a 32-bit mask w/ the referenced bits clear and all other bits set. 1121 uint32_t Mask = ~(((uint32_t)0xffffffff >> lsb) << (32 - width) >> 1122 (32 - (lsb + width))); 1123 Inst.addOperand(MCOperand::CreateImm(Mask)); 1124 } 1125 1126 void addImmOperands(MCInst &Inst, unsigned N) const { 1127 assert(N == 1 && "Invalid number of operands!"); 1128 addExpr(Inst, getImm()); 1129 } 1130 1131 void addFPImmOperands(MCInst &Inst, unsigned N) const { 1132 assert(N == 1 && "Invalid number of operands!"); 1133 Inst.addOperand(MCOperand::CreateImm(getFPImm())); 1134 } 1135 1136 void addImm8s4Operands(MCInst &Inst, unsigned N) const { 1137 assert(N == 1 && "Invalid number of operands!"); 1138 // FIXME: We really want to scale the value here, but the LDRD/STRD 1139 // instruction don't encode operands that way yet. 1140 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1141 Inst.addOperand(MCOperand::CreateImm(CE->getValue())); 1142 } 1143 1144 void addImm0_1020s4Operands(MCInst &Inst, unsigned N) const { 1145 assert(N == 1 && "Invalid number of operands!"); 1146 // The immediate is scaled by four in the encoding and is stored 1147 // in the MCInst as such. Lop off the low two bits here. 1148 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1149 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1150 } 1151 1152 void addImm0_508s4Operands(MCInst &Inst, unsigned N) const { 1153 assert(N == 1 && "Invalid number of operands!"); 1154 // The immediate is scaled by four in the encoding and is stored 1155 // in the MCInst as such. Lop off the low two bits here. 1156 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1157 Inst.addOperand(MCOperand::CreateImm(CE->getValue() / 4)); 1158 } 1159 1160 void addImm0_255Operands(MCInst &Inst, unsigned N) const { 1161 assert(N == 1 && "Invalid number of operands!"); 1162 addExpr(Inst, getImm()); 1163 } 1164 1165 void addImm0_7Operands(MCInst &Inst, unsigned N) const { 1166 assert(N == 1 && "Invalid number of operands!"); 1167 addExpr(Inst, getImm()); 1168 } 1169 1170 void addImm0_15Operands(MCInst &Inst, unsigned N) const { 1171 assert(N == 1 && "Invalid number of operands!"); 1172 addExpr(Inst, getImm()); 1173 } 1174 1175 void addImm0_31Operands(MCInst &Inst, unsigned N) const { 1176 assert(N == 1 && "Invalid number of operands!"); 1177 addExpr(Inst, getImm()); 1178 } 1179 1180 void addImm1_16Operands(MCInst &Inst, unsigned N) const { 1181 assert(N == 1 && "Invalid number of operands!"); 1182 // The constant encodes as the immediate-1, and we store in the instruction 1183 // the bits as encoded, so subtract off one here. 1184 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1185 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1186 } 1187 1188 void addImm1_32Operands(MCInst &Inst, unsigned N) const { 1189 assert(N == 1 && "Invalid number of operands!"); 1190 // The constant encodes as the immediate-1, and we store in the instruction 1191 // the bits as encoded, so subtract off one here. 1192 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1193 Inst.addOperand(MCOperand::CreateImm(CE->getValue() - 1)); 1194 } 1195 1196 void addImm0_65535Operands(MCInst &Inst, unsigned N) const { 1197 assert(N == 1 && "Invalid number of operands!"); 1198 addExpr(Inst, getImm()); 1199 } 1200 1201 void addImm0_65535ExprOperands(MCInst &Inst, unsigned N) const { 1202 assert(N == 1 && "Invalid number of operands!"); 1203 addExpr(Inst, getImm()); 1204 } 1205 1206 void addImm24bitOperands(MCInst &Inst, unsigned N) const { 1207 assert(N == 1 && "Invalid number of operands!"); 1208 addExpr(Inst, getImm()); 1209 } 1210 1211 void addImmThumbSROperands(MCInst &Inst, unsigned N) const { 1212 assert(N == 1 && "Invalid number of operands!"); 1213 // The constant encodes as the immediate, except for 32, which encodes as 1214 // zero. 1215 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1216 unsigned Imm = CE->getValue(); 1217 Inst.addOperand(MCOperand::CreateImm((Imm == 32 ? 0 : Imm))); 1218 } 1219 1220 void addPKHLSLImmOperands(MCInst &Inst, unsigned N) const { 1221 assert(N == 1 && "Invalid number of operands!"); 1222 addExpr(Inst, getImm()); 1223 } 1224 1225 void addPKHASRImmOperands(MCInst &Inst, unsigned N) const { 1226 assert(N == 1 && "Invalid number of operands!"); 1227 // An ASR value of 32 encodes as 0, so that's how we want to add it to 1228 // the instruction as well. 1229 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1230 int Val = CE->getValue(); 1231 Inst.addOperand(MCOperand::CreateImm(Val == 32 ? 0 : Val)); 1232 } 1233 1234 void addARMSOImmOperands(MCInst &Inst, unsigned N) const { 1235 assert(N == 1 && "Invalid number of operands!"); 1236 addExpr(Inst, getImm()); 1237 } 1238 1239 void addT2SOImmOperands(MCInst &Inst, unsigned N) const { 1240 assert(N == 1 && "Invalid number of operands!"); 1241 addExpr(Inst, getImm()); 1242 } 1243 1244 void addSetEndImmOperands(MCInst &Inst, unsigned N) const { 1245 assert(N == 1 && "Invalid number of operands!"); 1246 addExpr(Inst, getImm()); 1247 } 1248 1249 void addMemBarrierOptOperands(MCInst &Inst, unsigned N) const { 1250 assert(N == 1 && "Invalid number of operands!"); 1251 Inst.addOperand(MCOperand::CreateImm(unsigned(getMemBarrierOpt()))); 1252 } 1253 1254 void addMemNoOffsetOperands(MCInst &Inst, unsigned N) const { 1255 assert(N == 1 && "Invalid number of operands!"); 1256 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1257 } 1258 1259 void addAlignedMemoryOperands(MCInst &Inst, unsigned N) const { 1260 assert(N == 2 && "Invalid number of operands!"); 1261 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1262 Inst.addOperand(MCOperand::CreateImm(Memory.Alignment)); 1263 } 1264 1265 void addAddrMode2Operands(MCInst &Inst, unsigned N) const { 1266 assert(N == 3 && "Invalid number of operands!"); 1267 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1268 if (!Memory.OffsetRegNum) { 1269 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1270 // Special case for #-0 1271 if (Val == INT32_MIN) Val = 0; 1272 if (Val < 0) Val = -Val; 1273 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1274 } else { 1275 // For register offset, we encode the shift type and negation flag 1276 // here. 1277 Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1278 Memory.ShiftImm, Memory.ShiftType); 1279 } 1280 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1281 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1282 Inst.addOperand(MCOperand::CreateImm(Val)); 1283 } 1284 1285 void addAM2OffsetImmOperands(MCInst &Inst, unsigned N) const { 1286 assert(N == 2 && "Invalid number of operands!"); 1287 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1288 assert(CE && "non-constant AM2OffsetImm operand!"); 1289 int32_t Val = CE->getValue(); 1290 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1291 // Special case for #-0 1292 if (Val == INT32_MIN) Val = 0; 1293 if (Val < 0) Val = -Val; 1294 Val = ARM_AM::getAM2Opc(AddSub, Val, ARM_AM::no_shift); 1295 Inst.addOperand(MCOperand::CreateReg(0)); 1296 Inst.addOperand(MCOperand::CreateImm(Val)); 1297 } 1298 1299 void addAddrMode3Operands(MCInst &Inst, unsigned N) const { 1300 assert(N == 3 && "Invalid number of operands!"); 1301 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1302 if (!Memory.OffsetRegNum) { 1303 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1304 // Special case for #-0 1305 if (Val == INT32_MIN) Val = 0; 1306 if (Val < 0) Val = -Val; 1307 Val = ARM_AM::getAM3Opc(AddSub, Val); 1308 } else { 1309 // For register offset, we encode the shift type and negation flag 1310 // here. 1311 Val = ARM_AM::getAM3Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 0); 1312 } 1313 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1314 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1315 Inst.addOperand(MCOperand::CreateImm(Val)); 1316 } 1317 1318 void addAM3OffsetOperands(MCInst &Inst, unsigned N) const { 1319 assert(N == 2 && "Invalid number of operands!"); 1320 if (Kind == k_PostIndexRegister) { 1321 int32_t Val = 1322 ARM_AM::getAM3Opc(PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub, 0); 1323 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1324 Inst.addOperand(MCOperand::CreateImm(Val)); 1325 return; 1326 } 1327 1328 // Constant offset. 1329 const MCConstantExpr *CE = static_cast<const MCConstantExpr*>(getImm()); 1330 int32_t Val = CE->getValue(); 1331 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1332 // Special case for #-0 1333 if (Val == INT32_MIN) Val = 0; 1334 if (Val < 0) Val = -Val; 1335 Val = ARM_AM::getAM3Opc(AddSub, Val); 1336 Inst.addOperand(MCOperand::CreateReg(0)); 1337 Inst.addOperand(MCOperand::CreateImm(Val)); 1338 } 1339 1340 void addAddrMode5Operands(MCInst &Inst, unsigned N) const { 1341 assert(N == 2 && "Invalid number of operands!"); 1342 // The lower two bits are always zero and as such are not encoded. 1343 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1344 ARM_AM::AddrOpc AddSub = Val < 0 ? ARM_AM::sub : ARM_AM::add; 1345 // Special case for #-0 1346 if (Val == INT32_MIN) Val = 0; 1347 if (Val < 0) Val = -Val; 1348 Val = ARM_AM::getAM5Opc(AddSub, Val); 1349 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1350 Inst.addOperand(MCOperand::CreateImm(Val)); 1351 } 1352 1353 void addMemImm8s4OffsetOperands(MCInst &Inst, unsigned N) const { 1354 assert(N == 2 && "Invalid number of operands!"); 1355 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1356 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1357 Inst.addOperand(MCOperand::CreateImm(Val)); 1358 } 1359 1360 void addMemImm0_1020s4OffsetOperands(MCInst &Inst, unsigned N) const { 1361 assert(N == 2 && "Invalid number of operands!"); 1362 // The lower two bits are always zero and as such are not encoded. 1363 int32_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() / 4 : 0; 1364 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1365 Inst.addOperand(MCOperand::CreateImm(Val)); 1366 } 1367 1368 void addMemImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1369 assert(N == 2 && "Invalid number of operands!"); 1370 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1371 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1372 Inst.addOperand(MCOperand::CreateImm(Val)); 1373 } 1374 1375 void addMemPosImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1376 addMemImm8OffsetOperands(Inst, N); 1377 } 1378 1379 void addMemNegImm8OffsetOperands(MCInst &Inst, unsigned N) const { 1380 addMemImm8OffsetOperands(Inst, N); 1381 } 1382 1383 void addMemUImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1384 assert(N == 2 && "Invalid number of operands!"); 1385 // If this is an immediate, it's a label reference. 1386 if (Kind == k_Immediate) { 1387 addExpr(Inst, getImm()); 1388 Inst.addOperand(MCOperand::CreateImm(0)); 1389 return; 1390 } 1391 1392 // Otherwise, it's a normal memory reg+offset. 1393 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1394 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1395 Inst.addOperand(MCOperand::CreateImm(Val)); 1396 } 1397 1398 void addMemImm12OffsetOperands(MCInst &Inst, unsigned N) const { 1399 assert(N == 2 && "Invalid number of operands!"); 1400 // If this is an immediate, it's a label reference. 1401 if (Kind == k_Immediate) { 1402 addExpr(Inst, getImm()); 1403 Inst.addOperand(MCOperand::CreateImm(0)); 1404 return; 1405 } 1406 1407 // Otherwise, it's a normal memory reg+offset. 1408 int64_t Val = Memory.OffsetImm ? Memory.OffsetImm->getValue() : 0; 1409 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1410 Inst.addOperand(MCOperand::CreateImm(Val)); 1411 } 1412 1413 void addMemTBBOperands(MCInst &Inst, unsigned N) const { 1414 assert(N == 2 && "Invalid number of operands!"); 1415 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1416 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1417 } 1418 1419 void addMemTBHOperands(MCInst &Inst, unsigned N) const { 1420 assert(N == 2 && "Invalid number of operands!"); 1421 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1422 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1423 } 1424 1425 void addMemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1426 assert(N == 3 && "Invalid number of operands!"); 1427 unsigned Val = ARM_AM::getAM2Opc(Memory.isNegative ? ARM_AM::sub : ARM_AM::add, 1428 Memory.ShiftImm, Memory.ShiftType); 1429 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1430 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1431 Inst.addOperand(MCOperand::CreateImm(Val)); 1432 } 1433 1434 void addT2MemRegOffsetOperands(MCInst &Inst, unsigned N) const { 1435 assert(N == 3 && "Invalid number of operands!"); 1436 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1437 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1438 Inst.addOperand(MCOperand::CreateImm(Memory.ShiftImm)); 1439 } 1440 1441 void addMemThumbRROperands(MCInst &Inst, unsigned N) const { 1442 assert(N == 2 && "Invalid number of operands!"); 1443 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1444 Inst.addOperand(MCOperand::CreateReg(Memory.OffsetRegNum)); 1445 } 1446 1447 void addMemThumbRIs4Operands(MCInst &Inst, unsigned N) const { 1448 assert(N == 2 && "Invalid number of operands!"); 1449 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1450 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1451 Inst.addOperand(MCOperand::CreateImm(Val)); 1452 } 1453 1454 void addMemThumbRIs2Operands(MCInst &Inst, unsigned N) const { 1455 assert(N == 2 && "Invalid number of operands!"); 1456 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 2) : 0; 1457 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1458 Inst.addOperand(MCOperand::CreateImm(Val)); 1459 } 1460 1461 void addMemThumbRIs1Operands(MCInst &Inst, unsigned N) const { 1462 assert(N == 2 && "Invalid number of operands!"); 1463 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue()) : 0; 1464 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1465 Inst.addOperand(MCOperand::CreateImm(Val)); 1466 } 1467 1468 void addMemThumbSPIOperands(MCInst &Inst, unsigned N) const { 1469 assert(N == 2 && "Invalid number of operands!"); 1470 int64_t Val = Memory.OffsetImm ? (Memory.OffsetImm->getValue() / 4) : 0; 1471 Inst.addOperand(MCOperand::CreateReg(Memory.BaseRegNum)); 1472 Inst.addOperand(MCOperand::CreateImm(Val)); 1473 } 1474 1475 void addPostIdxImm8Operands(MCInst &Inst, unsigned N) const { 1476 assert(N == 1 && "Invalid number of operands!"); 1477 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1478 assert(CE && "non-constant post-idx-imm8 operand!"); 1479 int Imm = CE->getValue(); 1480 bool isAdd = Imm >= 0; 1481 if (Imm == INT32_MIN) Imm = 0; 1482 Imm = (Imm < 0 ? -Imm : Imm) | (int)isAdd << 8; 1483 Inst.addOperand(MCOperand::CreateImm(Imm)); 1484 } 1485 1486 void addPostIdxImm8s4Operands(MCInst &Inst, unsigned N) const { 1487 assert(N == 1 && "Invalid number of operands!"); 1488 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1489 assert(CE && "non-constant post-idx-imm8s4 operand!"); 1490 int Imm = CE->getValue(); 1491 bool isAdd = Imm >= 0; 1492 if (Imm == INT32_MIN) Imm = 0; 1493 // Immediate is scaled by 4. 1494 Imm = ((Imm < 0 ? -Imm : Imm) / 4) | (int)isAdd << 8; 1495 Inst.addOperand(MCOperand::CreateImm(Imm)); 1496 } 1497 1498 void addPostIdxRegOperands(MCInst &Inst, unsigned N) const { 1499 assert(N == 2 && "Invalid number of operands!"); 1500 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1501 Inst.addOperand(MCOperand::CreateImm(PostIdxReg.isAdd)); 1502 } 1503 1504 void addPostIdxRegShiftedOperands(MCInst &Inst, unsigned N) const { 1505 assert(N == 2 && "Invalid number of operands!"); 1506 Inst.addOperand(MCOperand::CreateReg(PostIdxReg.RegNum)); 1507 // The sign, shift type, and shift amount are encoded in a single operand 1508 // using the AM2 encoding helpers. 1509 ARM_AM::AddrOpc opc = PostIdxReg.isAdd ? ARM_AM::add : ARM_AM::sub; 1510 unsigned Imm = ARM_AM::getAM2Opc(opc, PostIdxReg.ShiftImm, 1511 PostIdxReg.ShiftTy); 1512 Inst.addOperand(MCOperand::CreateImm(Imm)); 1513 } 1514 1515 void addMSRMaskOperands(MCInst &Inst, unsigned N) const { 1516 assert(N == 1 && "Invalid number of operands!"); 1517 Inst.addOperand(MCOperand::CreateImm(unsigned(getMSRMask()))); 1518 } 1519 1520 void addProcIFlagsOperands(MCInst &Inst, unsigned N) const { 1521 assert(N == 1 && "Invalid number of operands!"); 1522 Inst.addOperand(MCOperand::CreateImm(unsigned(getProcIFlags()))); 1523 } 1524 1525 void addVecListOneDOperands(MCInst &Inst, unsigned N) const { 1526 assert(N == 1 && "Invalid number of operands!"); 1527 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1528 } 1529 1530 void addVecListTwoDOperands(MCInst &Inst, unsigned N) const { 1531 assert(N == 1 && "Invalid number of operands!"); 1532 // Only the first register actually goes on the instruction. The rest 1533 // are implied by the opcode. 1534 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1535 } 1536 1537 void addVecListThreeDOperands(MCInst &Inst, unsigned N) const { 1538 assert(N == 1 && "Invalid number of operands!"); 1539 // Only the first register actually goes on the instruction. The rest 1540 // are implied by the opcode. 1541 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1542 } 1543 1544 void addVecListFourDOperands(MCInst &Inst, unsigned N) const { 1545 assert(N == 1 && "Invalid number of operands!"); 1546 // Only the first register actually goes on the instruction. The rest 1547 // are implied by the opcode. 1548 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1549 } 1550 1551 void addVecListTwoQOperands(MCInst &Inst, unsigned N) const { 1552 assert(N == 1 && "Invalid number of operands!"); 1553 // Only the first register actually goes on the instruction. The rest 1554 // are implied by the opcode. 1555 Inst.addOperand(MCOperand::CreateReg(VectorList.RegNum)); 1556 } 1557 1558 void addVectorIndex8Operands(MCInst &Inst, unsigned N) const { 1559 assert(N == 1 && "Invalid number of operands!"); 1560 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1561 } 1562 1563 void addVectorIndex16Operands(MCInst &Inst, unsigned N) const { 1564 assert(N == 1 && "Invalid number of operands!"); 1565 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1566 } 1567 1568 void addVectorIndex32Operands(MCInst &Inst, unsigned N) const { 1569 assert(N == 1 && "Invalid number of operands!"); 1570 Inst.addOperand(MCOperand::CreateImm(getVectorIndex())); 1571 } 1572 1573 void addNEONi8splatOperands(MCInst &Inst, unsigned N) const { 1574 assert(N == 1 && "Invalid number of operands!"); 1575 // The immediate encodes the type of constant as well as the value. 1576 // Mask in that this is an i8 splat. 1577 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1578 Inst.addOperand(MCOperand::CreateImm(CE->getValue() | 0xe00)); 1579 } 1580 1581 void addNEONi16splatOperands(MCInst &Inst, unsigned N) const { 1582 assert(N == 1 && "Invalid number of operands!"); 1583 // The immediate encodes the type of constant as well as the value. 1584 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1585 unsigned Value = CE->getValue(); 1586 if (Value >= 256) 1587 Value = (Value >> 8) | 0xa00; 1588 else 1589 Value |= 0x800; 1590 Inst.addOperand(MCOperand::CreateImm(Value)); 1591 } 1592 1593 void addNEONi32splatOperands(MCInst &Inst, unsigned N) const { 1594 assert(N == 1 && "Invalid number of operands!"); 1595 // The immediate encodes the type of constant as well as the value. 1596 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1597 unsigned Value = CE->getValue(); 1598 if (Value >= 256 && Value <= 0xff00) 1599 Value = (Value >> 8) | 0x200; 1600 else if (Value > 0xffff && Value <= 0xff0000) 1601 Value = (Value >> 16) | 0x400; 1602 else if (Value > 0xffffff) 1603 Value = (Value >> 24) | 0x600; 1604 Inst.addOperand(MCOperand::CreateImm(Value)); 1605 } 1606 1607 void addNEONi32vmovOperands(MCInst &Inst, unsigned N) const { 1608 assert(N == 1 && "Invalid number of operands!"); 1609 // The immediate encodes the type of constant as well as the value. 1610 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1611 unsigned Value = CE->getValue(); 1612 if (Value >= 256 && Value <= 0xffff) 1613 Value = (Value >> 8) | ((Value & 0xff) ? 0xc00 : 0x200); 1614 else if (Value > 0xffff && Value <= 0xffffff) 1615 Value = (Value >> 16) | ((Value & 0xff) ? 0xd00 : 0x400); 1616 else if (Value > 0xffffff) 1617 Value = (Value >> 24) | 0x600; 1618 Inst.addOperand(MCOperand::CreateImm(Value)); 1619 } 1620 1621 void addNEONi64splatOperands(MCInst &Inst, unsigned N) const { 1622 assert(N == 1 && "Invalid number of operands!"); 1623 // The immediate encodes the type of constant as well as the value. 1624 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(getImm()); 1625 uint64_t Value = CE->getValue(); 1626 unsigned Imm = 0; 1627 for (unsigned i = 0; i < 8; ++i, Value >>= 8) { 1628 Imm |= (Value & 1) << i; 1629 } 1630 Inst.addOperand(MCOperand::CreateImm(Imm | 0x1e00)); 1631 } 1632 1633 virtual void print(raw_ostream &OS) const; 1634 1635 static ARMOperand *CreateITMask(unsigned Mask, SMLoc S) { 1636 ARMOperand *Op = new ARMOperand(k_ITCondMask); 1637 Op->ITMask.Mask = Mask; 1638 Op->StartLoc = S; 1639 Op->EndLoc = S; 1640 return Op; 1641 } 1642 1643 static ARMOperand *CreateCondCode(ARMCC::CondCodes CC, SMLoc S) { 1644 ARMOperand *Op = new ARMOperand(k_CondCode); 1645 Op->CC.Val = CC; 1646 Op->StartLoc = S; 1647 Op->EndLoc = S; 1648 return Op; 1649 } 1650 1651 static ARMOperand *CreateCoprocNum(unsigned CopVal, SMLoc S) { 1652 ARMOperand *Op = new ARMOperand(k_CoprocNum); 1653 Op->Cop.Val = CopVal; 1654 Op->StartLoc = S; 1655 Op->EndLoc = S; 1656 return Op; 1657 } 1658 1659 static ARMOperand *CreateCoprocReg(unsigned CopVal, SMLoc S) { 1660 ARMOperand *Op = new ARMOperand(k_CoprocReg); 1661 Op->Cop.Val = CopVal; 1662 Op->StartLoc = S; 1663 Op->EndLoc = S; 1664 return Op; 1665 } 1666 1667 static ARMOperand *CreateCoprocOption(unsigned Val, SMLoc S, SMLoc E) { 1668 ARMOperand *Op = new ARMOperand(k_CoprocOption); 1669 Op->Cop.Val = Val; 1670 Op->StartLoc = S; 1671 Op->EndLoc = E; 1672 return Op; 1673 } 1674 1675 static ARMOperand *CreateCCOut(unsigned RegNum, SMLoc S) { 1676 ARMOperand *Op = new ARMOperand(k_CCOut); 1677 Op->Reg.RegNum = RegNum; 1678 Op->StartLoc = S; 1679 Op->EndLoc = S; 1680 return Op; 1681 } 1682 1683 static ARMOperand *CreateToken(StringRef Str, SMLoc S) { 1684 ARMOperand *Op = new ARMOperand(k_Token); 1685 Op->Tok.Data = Str.data(); 1686 Op->Tok.Length = Str.size(); 1687 Op->StartLoc = S; 1688 Op->EndLoc = S; 1689 return Op; 1690 } 1691 1692 static ARMOperand *CreateReg(unsigned RegNum, SMLoc S, SMLoc E) { 1693 ARMOperand *Op = new ARMOperand(k_Register); 1694 Op->Reg.RegNum = RegNum; 1695 Op->StartLoc = S; 1696 Op->EndLoc = E; 1697 return Op; 1698 } 1699 1700 static ARMOperand *CreateShiftedRegister(ARM_AM::ShiftOpc ShTy, 1701 unsigned SrcReg, 1702 unsigned ShiftReg, 1703 unsigned ShiftImm, 1704 SMLoc S, SMLoc E) { 1705 ARMOperand *Op = new ARMOperand(k_ShiftedRegister); 1706 Op->RegShiftedReg.ShiftTy = ShTy; 1707 Op->RegShiftedReg.SrcReg = SrcReg; 1708 Op->RegShiftedReg.ShiftReg = ShiftReg; 1709 Op->RegShiftedReg.ShiftImm = ShiftImm; 1710 Op->StartLoc = S; 1711 Op->EndLoc = E; 1712 return Op; 1713 } 1714 1715 static ARMOperand *CreateShiftedImmediate(ARM_AM::ShiftOpc ShTy, 1716 unsigned SrcReg, 1717 unsigned ShiftImm, 1718 SMLoc S, SMLoc E) { 1719 ARMOperand *Op = new ARMOperand(k_ShiftedImmediate); 1720 Op->RegShiftedImm.ShiftTy = ShTy; 1721 Op->RegShiftedImm.SrcReg = SrcReg; 1722 Op->RegShiftedImm.ShiftImm = ShiftImm; 1723 Op->StartLoc = S; 1724 Op->EndLoc = E; 1725 return Op; 1726 } 1727 1728 static ARMOperand *CreateShifterImm(bool isASR, unsigned Imm, 1729 SMLoc S, SMLoc E) { 1730 ARMOperand *Op = new ARMOperand(k_ShifterImmediate); 1731 Op->ShifterImm.isASR = isASR; 1732 Op->ShifterImm.Imm = Imm; 1733 Op->StartLoc = S; 1734 Op->EndLoc = E; 1735 return Op; 1736 } 1737 1738 static ARMOperand *CreateRotImm(unsigned Imm, SMLoc S, SMLoc E) { 1739 ARMOperand *Op = new ARMOperand(k_RotateImmediate); 1740 Op->RotImm.Imm = Imm; 1741 Op->StartLoc = S; 1742 Op->EndLoc = E; 1743 return Op; 1744 } 1745 1746 static ARMOperand *CreateBitfield(unsigned LSB, unsigned Width, 1747 SMLoc S, SMLoc E) { 1748 ARMOperand *Op = new ARMOperand(k_BitfieldDescriptor); 1749 Op->Bitfield.LSB = LSB; 1750 Op->Bitfield.Width = Width; 1751 Op->StartLoc = S; 1752 Op->EndLoc = E; 1753 return Op; 1754 } 1755 1756 static ARMOperand * 1757 CreateRegList(const SmallVectorImpl<std::pair<unsigned, SMLoc> > &Regs, 1758 SMLoc StartLoc, SMLoc EndLoc) { 1759 KindTy Kind = k_RegisterList; 1760 1761 if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Regs.front().first)) 1762 Kind = k_DPRRegisterList; 1763 else if (ARMMCRegisterClasses[ARM::SPRRegClassID]. 1764 contains(Regs.front().first)) 1765 Kind = k_SPRRegisterList; 1766 1767 ARMOperand *Op = new ARMOperand(Kind); 1768 for (SmallVectorImpl<std::pair<unsigned, SMLoc> >::const_iterator 1769 I = Regs.begin(), E = Regs.end(); I != E; ++I) 1770 Op->Registers.push_back(I->first); 1771 array_pod_sort(Op->Registers.begin(), Op->Registers.end()); 1772 Op->StartLoc = StartLoc; 1773 Op->EndLoc = EndLoc; 1774 return Op; 1775 } 1776 1777 static ARMOperand *CreateVectorList(unsigned RegNum, unsigned Count, 1778 SMLoc S, SMLoc E) { 1779 ARMOperand *Op = new ARMOperand(k_VectorList); 1780 Op->VectorList.RegNum = RegNum; 1781 Op->VectorList.Count = Count; 1782 Op->StartLoc = S; 1783 Op->EndLoc = E; 1784 return Op; 1785 } 1786 1787 static ARMOperand *CreateVectorIndex(unsigned Idx, SMLoc S, SMLoc E, 1788 MCContext &Ctx) { 1789 ARMOperand *Op = new ARMOperand(k_VectorIndex); 1790 Op->VectorIndex.Val = Idx; 1791 Op->StartLoc = S; 1792 Op->EndLoc = E; 1793 return Op; 1794 } 1795 1796 static ARMOperand *CreateImm(const MCExpr *Val, SMLoc S, SMLoc E) { 1797 ARMOperand *Op = new ARMOperand(k_Immediate); 1798 Op->Imm.Val = Val; 1799 Op->StartLoc = S; 1800 Op->EndLoc = E; 1801 return Op; 1802 } 1803 1804 static ARMOperand *CreateFPImm(unsigned Val, SMLoc S, MCContext &Ctx) { 1805 ARMOperand *Op = new ARMOperand(k_FPImmediate); 1806 Op->FPImm.Val = Val; 1807 Op->StartLoc = S; 1808 Op->EndLoc = S; 1809 return Op; 1810 } 1811 1812 static ARMOperand *CreateMem(unsigned BaseRegNum, 1813 const MCConstantExpr *OffsetImm, 1814 unsigned OffsetRegNum, 1815 ARM_AM::ShiftOpc ShiftType, 1816 unsigned ShiftImm, 1817 unsigned Alignment, 1818 bool isNegative, 1819 SMLoc S, SMLoc E) { 1820 ARMOperand *Op = new ARMOperand(k_Memory); 1821 Op->Memory.BaseRegNum = BaseRegNum; 1822 Op->Memory.OffsetImm = OffsetImm; 1823 Op->Memory.OffsetRegNum = OffsetRegNum; 1824 Op->Memory.ShiftType = ShiftType; 1825 Op->Memory.ShiftImm = ShiftImm; 1826 Op->Memory.Alignment = Alignment; 1827 Op->Memory.isNegative = isNegative; 1828 Op->StartLoc = S; 1829 Op->EndLoc = E; 1830 return Op; 1831 } 1832 1833 static ARMOperand *CreatePostIdxReg(unsigned RegNum, bool isAdd, 1834 ARM_AM::ShiftOpc ShiftTy, 1835 unsigned ShiftImm, 1836 SMLoc S, SMLoc E) { 1837 ARMOperand *Op = new ARMOperand(k_PostIndexRegister); 1838 Op->PostIdxReg.RegNum = RegNum; 1839 Op->PostIdxReg.isAdd = isAdd; 1840 Op->PostIdxReg.ShiftTy = ShiftTy; 1841 Op->PostIdxReg.ShiftImm = ShiftImm; 1842 Op->StartLoc = S; 1843 Op->EndLoc = E; 1844 return Op; 1845 } 1846 1847 static ARMOperand *CreateMemBarrierOpt(ARM_MB::MemBOpt Opt, SMLoc S) { 1848 ARMOperand *Op = new ARMOperand(k_MemBarrierOpt); 1849 Op->MBOpt.Val = Opt; 1850 Op->StartLoc = S; 1851 Op->EndLoc = S; 1852 return Op; 1853 } 1854 1855 static ARMOperand *CreateProcIFlags(ARM_PROC::IFlags IFlags, SMLoc S) { 1856 ARMOperand *Op = new ARMOperand(k_ProcIFlags); 1857 Op->IFlags.Val = IFlags; 1858 Op->StartLoc = S; 1859 Op->EndLoc = S; 1860 return Op; 1861 } 1862 1863 static ARMOperand *CreateMSRMask(unsigned MMask, SMLoc S) { 1864 ARMOperand *Op = new ARMOperand(k_MSRMask); 1865 Op->MMask.Val = MMask; 1866 Op->StartLoc = S; 1867 Op->EndLoc = S; 1868 return Op; 1869 } 1870}; 1871 1872} // end anonymous namespace. 1873 1874void ARMOperand::print(raw_ostream &OS) const { 1875 switch (Kind) { 1876 case k_FPImmediate: 1877 OS << "<fpimm " << getFPImm() << "(" << ARM_AM::getFPImmFloat(getFPImm()) 1878 << ") >"; 1879 break; 1880 case k_CondCode: 1881 OS << "<ARMCC::" << ARMCondCodeToString(getCondCode()) << ">"; 1882 break; 1883 case k_CCOut: 1884 OS << "<ccout " << getReg() << ">"; 1885 break; 1886 case k_ITCondMask: { 1887 static const char *MaskStr[] = { 1888 "()", "(t)", "(e)", "(tt)", "(et)", "(te)", "(ee)", "(ttt)", "(ett)", 1889 "(tet)", "(eet)", "(tte)", "(ete)", "(tee)", "(eee)" 1890 }; 1891 assert((ITMask.Mask & 0xf) == ITMask.Mask); 1892 OS << "<it-mask " << MaskStr[ITMask.Mask] << ">"; 1893 break; 1894 } 1895 case k_CoprocNum: 1896 OS << "<coprocessor number: " << getCoproc() << ">"; 1897 break; 1898 case k_CoprocReg: 1899 OS << "<coprocessor register: " << getCoproc() << ">"; 1900 break; 1901 case k_CoprocOption: 1902 OS << "<coprocessor option: " << CoprocOption.Val << ">"; 1903 break; 1904 case k_MSRMask: 1905 OS << "<mask: " << getMSRMask() << ">"; 1906 break; 1907 case k_Immediate: 1908 getImm()->print(OS); 1909 break; 1910 case k_MemBarrierOpt: 1911 OS << "<ARM_MB::" << MemBOptToString(getMemBarrierOpt()) << ">"; 1912 break; 1913 case k_Memory: 1914 OS << "<memory " 1915 << " base:" << Memory.BaseRegNum; 1916 OS << ">"; 1917 break; 1918 case k_PostIndexRegister: 1919 OS << "post-idx register " << (PostIdxReg.isAdd ? "" : "-") 1920 << PostIdxReg.RegNum; 1921 if (PostIdxReg.ShiftTy != ARM_AM::no_shift) 1922 OS << ARM_AM::getShiftOpcStr(PostIdxReg.ShiftTy) << " " 1923 << PostIdxReg.ShiftImm; 1924 OS << ">"; 1925 break; 1926 case k_ProcIFlags: { 1927 OS << "<ARM_PROC::"; 1928 unsigned IFlags = getProcIFlags(); 1929 for (int i=2; i >= 0; --i) 1930 if (IFlags & (1 << i)) 1931 OS << ARM_PROC::IFlagsToString(1 << i); 1932 OS << ">"; 1933 break; 1934 } 1935 case k_Register: 1936 OS << "<register " << getReg() << ">"; 1937 break; 1938 case k_ShifterImmediate: 1939 OS << "<shift " << (ShifterImm.isASR ? "asr" : "lsl") 1940 << " #" << ShifterImm.Imm << ">"; 1941 break; 1942 case k_ShiftedRegister: 1943 OS << "<so_reg_reg " 1944 << RegShiftedReg.SrcReg 1945 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedReg.ShiftImm)) 1946 << ", " << RegShiftedReg.ShiftReg << ", " 1947 << ARM_AM::getSORegOffset(RegShiftedReg.ShiftImm) 1948 << ">"; 1949 break; 1950 case k_ShiftedImmediate: 1951 OS << "<so_reg_imm " 1952 << RegShiftedImm.SrcReg 1953 << ARM_AM::getShiftOpcStr(ARM_AM::getSORegShOp(RegShiftedImm.ShiftImm)) 1954 << ", " << ARM_AM::getSORegOffset(RegShiftedImm.ShiftImm) 1955 << ">"; 1956 break; 1957 case k_RotateImmediate: 1958 OS << "<ror " << " #" << (RotImm.Imm * 8) << ">"; 1959 break; 1960 case k_BitfieldDescriptor: 1961 OS << "<bitfield " << "lsb: " << Bitfield.LSB 1962 << ", width: " << Bitfield.Width << ">"; 1963 break; 1964 case k_RegisterList: 1965 case k_DPRRegisterList: 1966 case k_SPRRegisterList: { 1967 OS << "<register_list "; 1968 1969 const SmallVectorImpl<unsigned> &RegList = getRegList(); 1970 for (SmallVectorImpl<unsigned>::const_iterator 1971 I = RegList.begin(), E = RegList.end(); I != E; ) { 1972 OS << *I; 1973 if (++I < E) OS << ", "; 1974 } 1975 1976 OS << ">"; 1977 break; 1978 } 1979 case k_VectorList: 1980 OS << "<vector_list " << VectorList.Count << " * " 1981 << VectorList.RegNum << ">"; 1982 break; 1983 case k_Token: 1984 OS << "'" << getToken() << "'"; 1985 break; 1986 case k_VectorIndex: 1987 OS << "<vectorindex " << getVectorIndex() << ">"; 1988 break; 1989 } 1990} 1991 1992/// @name Auto-generated Match Functions 1993/// { 1994 1995static unsigned MatchRegisterName(StringRef Name); 1996 1997/// } 1998 1999bool ARMAsmParser::ParseRegister(unsigned &RegNo, 2000 SMLoc &StartLoc, SMLoc &EndLoc) { 2001 RegNo = tryParseRegister(); 2002 2003 return (RegNo == (unsigned)-1); 2004} 2005 2006/// Try to parse a register name. The token must be an Identifier when called, 2007/// and if it is a register name the token is eaten and the register number is 2008/// returned. Otherwise return -1. 2009/// 2010int ARMAsmParser::tryParseRegister() { 2011 const AsmToken &Tok = Parser.getTok(); 2012 if (Tok.isNot(AsmToken::Identifier)) return -1; 2013 2014 // FIXME: Validate register for the current architecture; we have to do 2015 // validation later, so maybe there is no need for this here. 2016 std::string upperCase = Tok.getString().str(); 2017 std::string lowerCase = LowercaseString(upperCase); 2018 unsigned RegNum = MatchRegisterName(lowerCase); 2019 if (!RegNum) { 2020 RegNum = StringSwitch<unsigned>(lowerCase) 2021 .Case("r13", ARM::SP) 2022 .Case("r14", ARM::LR) 2023 .Case("r15", ARM::PC) 2024 .Case("ip", ARM::R12) 2025 .Default(0); 2026 } 2027 if (!RegNum) return -1; 2028 2029 Parser.Lex(); // Eat identifier token. 2030 2031 return RegNum; 2032} 2033 2034// Try to parse a shifter (e.g., "lsl <amt>"). On success, return 0. 2035// If a recoverable error occurs, return 1. If an irrecoverable error 2036// occurs, return -1. An irrecoverable error is one where tokens have been 2037// consumed in the process of trying to parse the shifter (i.e., when it is 2038// indeed a shifter operand, but malformed). 2039int ARMAsmParser::tryParseShiftRegister( 2040 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2041 SMLoc S = Parser.getTok().getLoc(); 2042 const AsmToken &Tok = Parser.getTok(); 2043 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2044 2045 std::string upperCase = Tok.getString().str(); 2046 std::string lowerCase = LowercaseString(upperCase); 2047 ARM_AM::ShiftOpc ShiftTy = StringSwitch<ARM_AM::ShiftOpc>(lowerCase) 2048 .Case("lsl", ARM_AM::lsl) 2049 .Case("lsr", ARM_AM::lsr) 2050 .Case("asr", ARM_AM::asr) 2051 .Case("ror", ARM_AM::ror) 2052 .Case("rrx", ARM_AM::rrx) 2053 .Default(ARM_AM::no_shift); 2054 2055 if (ShiftTy == ARM_AM::no_shift) 2056 return 1; 2057 2058 Parser.Lex(); // Eat the operator. 2059 2060 // The source register for the shift has already been added to the 2061 // operand list, so we need to pop it off and combine it into the shifted 2062 // register operand instead. 2063 OwningPtr<ARMOperand> PrevOp((ARMOperand*)Operands.pop_back_val()); 2064 if (!PrevOp->isReg()) 2065 return Error(PrevOp->getStartLoc(), "shift must be of a register"); 2066 int SrcReg = PrevOp->getReg(); 2067 int64_t Imm = 0; 2068 int ShiftReg = 0; 2069 if (ShiftTy == ARM_AM::rrx) { 2070 // RRX Doesn't have an explicit shift amount. The encoder expects 2071 // the shift register to be the same as the source register. Seems odd, 2072 // but OK. 2073 ShiftReg = SrcReg; 2074 } else { 2075 // Figure out if this is shifted by a constant or a register (for non-RRX). 2076 if (Parser.getTok().is(AsmToken::Hash)) { 2077 Parser.Lex(); // Eat hash. 2078 SMLoc ImmLoc = Parser.getTok().getLoc(); 2079 const MCExpr *ShiftExpr = 0; 2080 if (getParser().ParseExpression(ShiftExpr)) { 2081 Error(ImmLoc, "invalid immediate shift value"); 2082 return -1; 2083 } 2084 // The expression must be evaluatable as an immediate. 2085 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftExpr); 2086 if (!CE) { 2087 Error(ImmLoc, "invalid immediate shift value"); 2088 return -1; 2089 } 2090 // Range check the immediate. 2091 // lsl, ror: 0 <= imm <= 31 2092 // lsr, asr: 0 <= imm <= 32 2093 Imm = CE->getValue(); 2094 if (Imm < 0 || 2095 ((ShiftTy == ARM_AM::lsl || ShiftTy == ARM_AM::ror) && Imm > 31) || 2096 ((ShiftTy == ARM_AM::lsr || ShiftTy == ARM_AM::asr) && Imm > 32)) { 2097 Error(ImmLoc, "immediate shift value out of range"); 2098 return -1; 2099 } 2100 } else if (Parser.getTok().is(AsmToken::Identifier)) { 2101 ShiftReg = tryParseRegister(); 2102 SMLoc L = Parser.getTok().getLoc(); 2103 if (ShiftReg == -1) { 2104 Error (L, "expected immediate or register in shift operand"); 2105 return -1; 2106 } 2107 } else { 2108 Error (Parser.getTok().getLoc(), 2109 "expected immediate or register in shift operand"); 2110 return -1; 2111 } 2112 } 2113 2114 if (ShiftReg && ShiftTy != ARM_AM::rrx) 2115 Operands.push_back(ARMOperand::CreateShiftedRegister(ShiftTy, SrcReg, 2116 ShiftReg, Imm, 2117 S, Parser.getTok().getLoc())); 2118 else 2119 Operands.push_back(ARMOperand::CreateShiftedImmediate(ShiftTy, SrcReg, Imm, 2120 S, Parser.getTok().getLoc())); 2121 2122 return 0; 2123} 2124 2125 2126/// Try to parse a register name. The token must be an Identifier when called. 2127/// If it's a register, an AsmOperand is created. Another AsmOperand is created 2128/// if there is a "writeback". 'true' if it's not a register. 2129/// 2130/// TODO this is likely to change to allow different register types and or to 2131/// parse for a specific register type. 2132bool ARMAsmParser:: 2133tryParseRegisterWithWriteBack(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2134 SMLoc S = Parser.getTok().getLoc(); 2135 int RegNo = tryParseRegister(); 2136 if (RegNo == -1) 2137 return true; 2138 2139 Operands.push_back(ARMOperand::CreateReg(RegNo, S, Parser.getTok().getLoc())); 2140 2141 const AsmToken &ExclaimTok = Parser.getTok(); 2142 if (ExclaimTok.is(AsmToken::Exclaim)) { 2143 Operands.push_back(ARMOperand::CreateToken(ExclaimTok.getString(), 2144 ExclaimTok.getLoc())); 2145 Parser.Lex(); // Eat exclaim token 2146 return false; 2147 } 2148 2149 // Also check for an index operand. This is only legal for vector registers, 2150 // but that'll get caught OK in operand matching, so we don't need to 2151 // explicitly filter everything else out here. 2152 if (Parser.getTok().is(AsmToken::LBrac)) { 2153 SMLoc SIdx = Parser.getTok().getLoc(); 2154 Parser.Lex(); // Eat left bracket token. 2155 2156 const MCExpr *ImmVal; 2157 if (getParser().ParseExpression(ImmVal)) 2158 return MatchOperand_ParseFail; 2159 const MCConstantExpr *MCE = dyn_cast<MCConstantExpr>(ImmVal); 2160 if (!MCE) { 2161 TokError("immediate value expected for vector index"); 2162 return MatchOperand_ParseFail; 2163 } 2164 2165 SMLoc E = Parser.getTok().getLoc(); 2166 if (Parser.getTok().isNot(AsmToken::RBrac)) { 2167 Error(E, "']' expected"); 2168 return MatchOperand_ParseFail; 2169 } 2170 2171 Parser.Lex(); // Eat right bracket token. 2172 2173 Operands.push_back(ARMOperand::CreateVectorIndex(MCE->getValue(), 2174 SIdx, E, 2175 getContext())); 2176 } 2177 2178 return false; 2179} 2180 2181/// MatchCoprocessorOperandName - Try to parse an coprocessor related 2182/// instruction with a symbolic operand name. Example: "p1", "p7", "c3", 2183/// "c5", ... 2184static int MatchCoprocessorOperandName(StringRef Name, char CoprocOp) { 2185 // Use the same layout as the tablegen'erated register name matcher. Ugly, 2186 // but efficient. 2187 switch (Name.size()) { 2188 default: break; 2189 case 2: 2190 if (Name[0] != CoprocOp) 2191 return -1; 2192 switch (Name[1]) { 2193 default: return -1; 2194 case '0': return 0; 2195 case '1': return 1; 2196 case '2': return 2; 2197 case '3': return 3; 2198 case '4': return 4; 2199 case '5': return 5; 2200 case '6': return 6; 2201 case '7': return 7; 2202 case '8': return 8; 2203 case '9': return 9; 2204 } 2205 break; 2206 case 3: 2207 if (Name[0] != CoprocOp || Name[1] != '1') 2208 return -1; 2209 switch (Name[2]) { 2210 default: return -1; 2211 case '0': return 10; 2212 case '1': return 11; 2213 case '2': return 12; 2214 case '3': return 13; 2215 case '4': return 14; 2216 case '5': return 15; 2217 } 2218 break; 2219 } 2220 2221 return -1; 2222} 2223 2224/// parseITCondCode - Try to parse a condition code for an IT instruction. 2225ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2226parseITCondCode(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2227 SMLoc S = Parser.getTok().getLoc(); 2228 const AsmToken &Tok = Parser.getTok(); 2229 if (!Tok.is(AsmToken::Identifier)) 2230 return MatchOperand_NoMatch; 2231 unsigned CC = StringSwitch<unsigned>(Tok.getString()) 2232 .Case("eq", ARMCC::EQ) 2233 .Case("ne", ARMCC::NE) 2234 .Case("hs", ARMCC::HS) 2235 .Case("cs", ARMCC::HS) 2236 .Case("lo", ARMCC::LO) 2237 .Case("cc", ARMCC::LO) 2238 .Case("mi", ARMCC::MI) 2239 .Case("pl", ARMCC::PL) 2240 .Case("vs", ARMCC::VS) 2241 .Case("vc", ARMCC::VC) 2242 .Case("hi", ARMCC::HI) 2243 .Case("ls", ARMCC::LS) 2244 .Case("ge", ARMCC::GE) 2245 .Case("lt", ARMCC::LT) 2246 .Case("gt", ARMCC::GT) 2247 .Case("le", ARMCC::LE) 2248 .Case("al", ARMCC::AL) 2249 .Default(~0U); 2250 if (CC == ~0U) 2251 return MatchOperand_NoMatch; 2252 Parser.Lex(); // Eat the token. 2253 2254 Operands.push_back(ARMOperand::CreateCondCode(ARMCC::CondCodes(CC), S)); 2255 2256 return MatchOperand_Success; 2257} 2258 2259/// parseCoprocNumOperand - Try to parse an coprocessor number operand. The 2260/// token must be an Identifier when called, and if it is a coprocessor 2261/// number, the token is eaten and the operand is added to the operand list. 2262ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2263parseCoprocNumOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2264 SMLoc S = Parser.getTok().getLoc(); 2265 const AsmToken &Tok = Parser.getTok(); 2266 if (Tok.isNot(AsmToken::Identifier)) 2267 return MatchOperand_NoMatch; 2268 2269 int Num = MatchCoprocessorOperandName(Tok.getString(), 'p'); 2270 if (Num == -1) 2271 return MatchOperand_NoMatch; 2272 2273 Parser.Lex(); // Eat identifier token. 2274 Operands.push_back(ARMOperand::CreateCoprocNum(Num, S)); 2275 return MatchOperand_Success; 2276} 2277 2278/// parseCoprocRegOperand - Try to parse an coprocessor register operand. The 2279/// token must be an Identifier when called, and if it is a coprocessor 2280/// number, the token is eaten and the operand is added to the operand list. 2281ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2282parseCoprocRegOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2283 SMLoc S = Parser.getTok().getLoc(); 2284 const AsmToken &Tok = Parser.getTok(); 2285 if (Tok.isNot(AsmToken::Identifier)) 2286 return MatchOperand_NoMatch; 2287 2288 int Reg = MatchCoprocessorOperandName(Tok.getString(), 'c'); 2289 if (Reg == -1) 2290 return MatchOperand_NoMatch; 2291 2292 Parser.Lex(); // Eat identifier token. 2293 Operands.push_back(ARMOperand::CreateCoprocReg(Reg, S)); 2294 return MatchOperand_Success; 2295} 2296 2297/// parseCoprocOptionOperand - Try to parse an coprocessor option operand. 2298/// coproc_option : '{' imm0_255 '}' 2299ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2300parseCoprocOptionOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2301 SMLoc S = Parser.getTok().getLoc(); 2302 2303 // If this isn't a '{', this isn't a coprocessor immediate operand. 2304 if (Parser.getTok().isNot(AsmToken::LCurly)) 2305 return MatchOperand_NoMatch; 2306 Parser.Lex(); // Eat the '{' 2307 2308 const MCExpr *Expr; 2309 SMLoc Loc = Parser.getTok().getLoc(); 2310 if (getParser().ParseExpression(Expr)) { 2311 Error(Loc, "illegal expression"); 2312 return MatchOperand_ParseFail; 2313 } 2314 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 2315 if (!CE || CE->getValue() < 0 || CE->getValue() > 255) { 2316 Error(Loc, "coprocessor option must be an immediate in range [0, 255]"); 2317 return MatchOperand_ParseFail; 2318 } 2319 int Val = CE->getValue(); 2320 2321 // Check for and consume the closing '}' 2322 if (Parser.getTok().isNot(AsmToken::RCurly)) 2323 return MatchOperand_ParseFail; 2324 SMLoc E = Parser.getTok().getLoc(); 2325 Parser.Lex(); // Eat the '}' 2326 2327 Operands.push_back(ARMOperand::CreateCoprocOption(Val, S, E)); 2328 return MatchOperand_Success; 2329} 2330 2331// For register list parsing, we need to map from raw GPR register numbering 2332// to the enumeration values. The enumeration values aren't sorted by 2333// register number due to our using "sp", "lr" and "pc" as canonical names. 2334static unsigned getNextRegister(unsigned Reg) { 2335 // If this is a GPR, we need to do it manually, otherwise we can rely 2336 // on the sort ordering of the enumeration since the other reg-classes 2337 // are sane. 2338 if (!ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2339 return Reg + 1; 2340 switch(Reg) { 2341 default: assert(0 && "Invalid GPR number!"); 2342 case ARM::R0: return ARM::R1; case ARM::R1: return ARM::R2; 2343 case ARM::R2: return ARM::R3; case ARM::R3: return ARM::R4; 2344 case ARM::R4: return ARM::R5; case ARM::R5: return ARM::R6; 2345 case ARM::R6: return ARM::R7; case ARM::R7: return ARM::R8; 2346 case ARM::R8: return ARM::R9; case ARM::R9: return ARM::R10; 2347 case ARM::R10: return ARM::R11; case ARM::R11: return ARM::R12; 2348 case ARM::R12: return ARM::SP; case ARM::SP: return ARM::LR; 2349 case ARM::LR: return ARM::PC; case ARM::PC: return ARM::R0; 2350 } 2351} 2352 2353/// Parse a register list. 2354bool ARMAsmParser:: 2355parseRegisterList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2356 assert(Parser.getTok().is(AsmToken::LCurly) && 2357 "Token is not a Left Curly Brace"); 2358 SMLoc S = Parser.getTok().getLoc(); 2359 Parser.Lex(); // Eat '{' token. 2360 SMLoc RegLoc = Parser.getTok().getLoc(); 2361 2362 // Check the first register in the list to see what register class 2363 // this is a list of. 2364 int Reg = tryParseRegister(); 2365 if (Reg == -1) 2366 return Error(RegLoc, "register expected"); 2367 2368 const MCRegisterClass *RC; 2369 if (ARMMCRegisterClasses[ARM::GPRRegClassID].contains(Reg)) 2370 RC = &ARMMCRegisterClasses[ARM::GPRRegClassID]; 2371 else if (ARMMCRegisterClasses[ARM::DPRRegClassID].contains(Reg)) 2372 RC = &ARMMCRegisterClasses[ARM::DPRRegClassID]; 2373 else if (ARMMCRegisterClasses[ARM::SPRRegClassID].contains(Reg)) 2374 RC = &ARMMCRegisterClasses[ARM::SPRRegClassID]; 2375 else 2376 return Error(RegLoc, "invalid register in register list"); 2377 2378 // The reglist instructions have at most 16 registers, so reserve 2379 // space for that many. 2380 SmallVector<std::pair<unsigned, SMLoc>, 16> Registers; 2381 // Store the first register. 2382 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2383 2384 // This starts immediately after the first register token in the list, 2385 // so we can see either a comma or a minus (range separator) as a legal 2386 // next token. 2387 while (Parser.getTok().is(AsmToken::Comma) || 2388 Parser.getTok().is(AsmToken::Minus)) { 2389 if (Parser.getTok().is(AsmToken::Minus)) { 2390 Parser.Lex(); // Eat the comma. 2391 SMLoc EndLoc = Parser.getTok().getLoc(); 2392 int EndReg = tryParseRegister(); 2393 if (EndReg == -1) 2394 return Error(EndLoc, "register expected"); 2395 // If the register is the same as the start reg, there's nothing 2396 // more to do. 2397 if (Reg == EndReg) 2398 continue; 2399 // The register must be in the same register class as the first. 2400 if (!RC->contains(EndReg)) 2401 return Error(EndLoc, "invalid register in register list"); 2402 // Ranges must go from low to high. 2403 if (getARMRegisterNumbering(Reg) > getARMRegisterNumbering(EndReg)) 2404 return Error(EndLoc, "bad range in register list"); 2405 2406 // Add all the registers in the range to the register list. 2407 while (Reg != EndReg) { 2408 Reg = getNextRegister(Reg); 2409 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2410 } 2411 continue; 2412 } 2413 Parser.Lex(); // Eat the comma. 2414 RegLoc = Parser.getTok().getLoc(); 2415 int OldReg = Reg; 2416 Reg = tryParseRegister(); 2417 if (Reg == -1) 2418 return Error(RegLoc, "register expected"); 2419 // The register must be in the same register class as the first. 2420 if (!RC->contains(Reg)) 2421 return Error(RegLoc, "invalid register in register list"); 2422 // List must be monotonically increasing. 2423 if (getARMRegisterNumbering(Reg) <= getARMRegisterNumbering(OldReg)) 2424 return Error(RegLoc, "register list not in ascending order"); 2425 // VFP register lists must also be contiguous. 2426 // It's OK to use the enumeration values directly here rather, as the 2427 // VFP register classes have the enum sorted properly. 2428 if (RC != &ARMMCRegisterClasses[ARM::GPRRegClassID] && 2429 Reg != OldReg + 1) 2430 return Error(RegLoc, "non-contiguous register range"); 2431 Registers.push_back(std::pair<unsigned, SMLoc>(Reg, RegLoc)); 2432 } 2433 2434 SMLoc E = Parser.getTok().getLoc(); 2435 if (Parser.getTok().isNot(AsmToken::RCurly)) 2436 return Error(E, "'}' expected"); 2437 Parser.Lex(); // Eat '}' token. 2438 2439 Operands.push_back(ARMOperand::CreateRegList(Registers, S, E)); 2440 return false; 2441} 2442 2443// parse a vector register list 2444ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2445parseVectorList(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2446 if(Parser.getTok().isNot(AsmToken::LCurly)) 2447 return MatchOperand_NoMatch; 2448 2449 SMLoc S = Parser.getTok().getLoc(); 2450 Parser.Lex(); // Eat '{' token. 2451 SMLoc RegLoc = Parser.getTok().getLoc(); 2452 2453 int Reg = tryParseRegister(); 2454 if (Reg == -1) { 2455 Error(RegLoc, "register expected"); 2456 return MatchOperand_ParseFail; 2457 } 2458 2459 unsigned FirstReg = Reg; 2460 unsigned Count = 1; 2461 while (Parser.getTok().is(AsmToken::Comma)) { 2462 Parser.Lex(); // Eat the comma. 2463 RegLoc = Parser.getTok().getLoc(); 2464 int OldReg = Reg; 2465 Reg = tryParseRegister(); 2466 if (Reg == -1) { 2467 Error(RegLoc, "register expected"); 2468 return MatchOperand_ParseFail; 2469 } 2470 // vector register lists must also be contiguous. 2471 // It's OK to use the enumeration values directly here rather, as the 2472 // VFP register classes have the enum sorted properly. 2473 if (Reg != OldReg + 1) { 2474 Error(RegLoc, "non-contiguous register range"); 2475 return MatchOperand_ParseFail; 2476 } 2477 2478 ++Count; 2479 } 2480 2481 SMLoc E = Parser.getTok().getLoc(); 2482 if (Parser.getTok().isNot(AsmToken::RCurly)) { 2483 Error(E, "'}' expected"); 2484 return MatchOperand_ParseFail; 2485 } 2486 Parser.Lex(); // Eat '}' token. 2487 2488 Operands.push_back(ARMOperand::CreateVectorList(FirstReg, Count, S, E)); 2489 return MatchOperand_Success; 2490} 2491 2492/// parseMemBarrierOptOperand - Try to parse DSB/DMB data barrier options. 2493ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2494parseMemBarrierOptOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2495 SMLoc S = Parser.getTok().getLoc(); 2496 const AsmToken &Tok = Parser.getTok(); 2497 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2498 StringRef OptStr = Tok.getString(); 2499 2500 unsigned Opt = StringSwitch<unsigned>(OptStr.slice(0, OptStr.size())) 2501 .Case("sy", ARM_MB::SY) 2502 .Case("st", ARM_MB::ST) 2503 .Case("sh", ARM_MB::ISH) 2504 .Case("ish", ARM_MB::ISH) 2505 .Case("shst", ARM_MB::ISHST) 2506 .Case("ishst", ARM_MB::ISHST) 2507 .Case("nsh", ARM_MB::NSH) 2508 .Case("un", ARM_MB::NSH) 2509 .Case("nshst", ARM_MB::NSHST) 2510 .Case("unst", ARM_MB::NSHST) 2511 .Case("osh", ARM_MB::OSH) 2512 .Case("oshst", ARM_MB::OSHST) 2513 .Default(~0U); 2514 2515 if (Opt == ~0U) 2516 return MatchOperand_NoMatch; 2517 2518 Parser.Lex(); // Eat identifier token. 2519 Operands.push_back(ARMOperand::CreateMemBarrierOpt((ARM_MB::MemBOpt)Opt, S)); 2520 return MatchOperand_Success; 2521} 2522 2523/// parseProcIFlagsOperand - Try to parse iflags from CPS instruction. 2524ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2525parseProcIFlagsOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2526 SMLoc S = Parser.getTok().getLoc(); 2527 const AsmToken &Tok = Parser.getTok(); 2528 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2529 StringRef IFlagsStr = Tok.getString(); 2530 2531 // An iflags string of "none" is interpreted to mean that none of the AIF 2532 // bits are set. Not a terribly useful instruction, but a valid encoding. 2533 unsigned IFlags = 0; 2534 if (IFlagsStr != "none") { 2535 for (int i = 0, e = IFlagsStr.size(); i != e; ++i) { 2536 unsigned Flag = StringSwitch<unsigned>(IFlagsStr.substr(i, 1)) 2537 .Case("a", ARM_PROC::A) 2538 .Case("i", ARM_PROC::I) 2539 .Case("f", ARM_PROC::F) 2540 .Default(~0U); 2541 2542 // If some specific iflag is already set, it means that some letter is 2543 // present more than once, this is not acceptable. 2544 if (Flag == ~0U || (IFlags & Flag)) 2545 return MatchOperand_NoMatch; 2546 2547 IFlags |= Flag; 2548 } 2549 } 2550 2551 Parser.Lex(); // Eat identifier token. 2552 Operands.push_back(ARMOperand::CreateProcIFlags((ARM_PROC::IFlags)IFlags, S)); 2553 return MatchOperand_Success; 2554} 2555 2556/// parseMSRMaskOperand - Try to parse mask flags from MSR instruction. 2557ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2558parseMSRMaskOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2559 SMLoc S = Parser.getTok().getLoc(); 2560 const AsmToken &Tok = Parser.getTok(); 2561 assert(Tok.is(AsmToken::Identifier) && "Token is not an Identifier"); 2562 StringRef Mask = Tok.getString(); 2563 2564 if (isMClass()) { 2565 // See ARMv6-M 10.1.1 2566 unsigned FlagsVal = StringSwitch<unsigned>(Mask) 2567 .Case("apsr", 0) 2568 .Case("iapsr", 1) 2569 .Case("eapsr", 2) 2570 .Case("xpsr", 3) 2571 .Case("ipsr", 5) 2572 .Case("epsr", 6) 2573 .Case("iepsr", 7) 2574 .Case("msp", 8) 2575 .Case("psp", 9) 2576 .Case("primask", 16) 2577 .Case("basepri", 17) 2578 .Case("basepri_max", 18) 2579 .Case("faultmask", 19) 2580 .Case("control", 20) 2581 .Default(~0U); 2582 2583 if (FlagsVal == ~0U) 2584 return MatchOperand_NoMatch; 2585 2586 if (!hasV7Ops() && FlagsVal >= 17 && FlagsVal <= 19) 2587 // basepri, basepri_max and faultmask only valid for V7m. 2588 return MatchOperand_NoMatch; 2589 2590 Parser.Lex(); // Eat identifier token. 2591 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2592 return MatchOperand_Success; 2593 } 2594 2595 // Split spec_reg from flag, example: CPSR_sxf => "CPSR" and "sxf" 2596 size_t Start = 0, Next = Mask.find('_'); 2597 StringRef Flags = ""; 2598 std::string SpecReg = LowercaseString(Mask.slice(Start, Next)); 2599 if (Next != StringRef::npos) 2600 Flags = Mask.slice(Next+1, Mask.size()); 2601 2602 // FlagsVal contains the complete mask: 2603 // 3-0: Mask 2604 // 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2605 unsigned FlagsVal = 0; 2606 2607 if (SpecReg == "apsr") { 2608 FlagsVal = StringSwitch<unsigned>(Flags) 2609 .Case("nzcvq", 0x8) // same as CPSR_f 2610 .Case("g", 0x4) // same as CPSR_s 2611 .Case("nzcvqg", 0xc) // same as CPSR_fs 2612 .Default(~0U); 2613 2614 if (FlagsVal == ~0U) { 2615 if (!Flags.empty()) 2616 return MatchOperand_NoMatch; 2617 else 2618 FlagsVal = 8; // No flag 2619 } 2620 } else if (SpecReg == "cpsr" || SpecReg == "spsr") { 2621 if (Flags == "all") // cpsr_all is an alias for cpsr_fc 2622 Flags = "fc"; 2623 for (int i = 0, e = Flags.size(); i != e; ++i) { 2624 unsigned Flag = StringSwitch<unsigned>(Flags.substr(i, 1)) 2625 .Case("c", 1) 2626 .Case("x", 2) 2627 .Case("s", 4) 2628 .Case("f", 8) 2629 .Default(~0U); 2630 2631 // If some specific flag is already set, it means that some letter is 2632 // present more than once, this is not acceptable. 2633 if (FlagsVal == ~0U || (FlagsVal & Flag)) 2634 return MatchOperand_NoMatch; 2635 FlagsVal |= Flag; 2636 } 2637 } else // No match for special register. 2638 return MatchOperand_NoMatch; 2639 2640 // Special register without flags is NOT equivalent to "fc" flags. 2641 // NOTE: This is a divergence from gas' behavior. Uncommenting the following 2642 // two lines would enable gas compatibility at the expense of breaking 2643 // round-tripping. 2644 // 2645 // if (!FlagsVal) 2646 // FlagsVal = 0x9; 2647 2648 // Bit 4: Special Reg (cpsr, apsr => 0; spsr => 1) 2649 if (SpecReg == "spsr") 2650 FlagsVal |= 16; 2651 2652 Parser.Lex(); // Eat identifier token. 2653 Operands.push_back(ARMOperand::CreateMSRMask(FlagsVal, S)); 2654 return MatchOperand_Success; 2655} 2656 2657ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2658parsePKHImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands, StringRef Op, 2659 int Low, int High) { 2660 const AsmToken &Tok = Parser.getTok(); 2661 if (Tok.isNot(AsmToken::Identifier)) { 2662 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2663 return MatchOperand_ParseFail; 2664 } 2665 StringRef ShiftName = Tok.getString(); 2666 std::string LowerOp = LowercaseString(Op); 2667 std::string UpperOp = UppercaseString(Op); 2668 if (ShiftName != LowerOp && ShiftName != UpperOp) { 2669 Error(Parser.getTok().getLoc(), Op + " operand expected."); 2670 return MatchOperand_ParseFail; 2671 } 2672 Parser.Lex(); // Eat shift type token. 2673 2674 // There must be a '#' and a shift amount. 2675 if (Parser.getTok().isNot(AsmToken::Hash)) { 2676 Error(Parser.getTok().getLoc(), "'#' expected"); 2677 return MatchOperand_ParseFail; 2678 } 2679 Parser.Lex(); // Eat hash token. 2680 2681 const MCExpr *ShiftAmount; 2682 SMLoc Loc = Parser.getTok().getLoc(); 2683 if (getParser().ParseExpression(ShiftAmount)) { 2684 Error(Loc, "illegal expression"); 2685 return MatchOperand_ParseFail; 2686 } 2687 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2688 if (!CE) { 2689 Error(Loc, "constant expression expected"); 2690 return MatchOperand_ParseFail; 2691 } 2692 int Val = CE->getValue(); 2693 if (Val < Low || Val > High) { 2694 Error(Loc, "immediate value out of range"); 2695 return MatchOperand_ParseFail; 2696 } 2697 2698 Operands.push_back(ARMOperand::CreateImm(CE, Loc, Parser.getTok().getLoc())); 2699 2700 return MatchOperand_Success; 2701} 2702 2703ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2704parseSetEndImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2705 const AsmToken &Tok = Parser.getTok(); 2706 SMLoc S = Tok.getLoc(); 2707 if (Tok.isNot(AsmToken::Identifier)) { 2708 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2709 return MatchOperand_ParseFail; 2710 } 2711 int Val = StringSwitch<int>(Tok.getString()) 2712 .Case("be", 1) 2713 .Case("le", 0) 2714 .Default(-1); 2715 Parser.Lex(); // Eat the token. 2716 2717 if (Val == -1) { 2718 Error(Tok.getLoc(), "'be' or 'le' operand expected"); 2719 return MatchOperand_ParseFail; 2720 } 2721 Operands.push_back(ARMOperand::CreateImm(MCConstantExpr::Create(Val, 2722 getContext()), 2723 S, Parser.getTok().getLoc())); 2724 return MatchOperand_Success; 2725} 2726 2727/// parseShifterImm - Parse the shifter immediate operand for SSAT/USAT 2728/// instructions. Legal values are: 2729/// lsl #n 'n' in [0,31] 2730/// asr #n 'n' in [1,32] 2731/// n == 32 encoded as n == 0. 2732ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2733parseShifterImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2734 const AsmToken &Tok = Parser.getTok(); 2735 SMLoc S = Tok.getLoc(); 2736 if (Tok.isNot(AsmToken::Identifier)) { 2737 Error(S, "shift operator 'asr' or 'lsl' expected"); 2738 return MatchOperand_ParseFail; 2739 } 2740 StringRef ShiftName = Tok.getString(); 2741 bool isASR; 2742 if (ShiftName == "lsl" || ShiftName == "LSL") 2743 isASR = false; 2744 else if (ShiftName == "asr" || ShiftName == "ASR") 2745 isASR = true; 2746 else { 2747 Error(S, "shift operator 'asr' or 'lsl' expected"); 2748 return MatchOperand_ParseFail; 2749 } 2750 Parser.Lex(); // Eat the operator. 2751 2752 // A '#' and a shift amount. 2753 if (Parser.getTok().isNot(AsmToken::Hash)) { 2754 Error(Parser.getTok().getLoc(), "'#' expected"); 2755 return MatchOperand_ParseFail; 2756 } 2757 Parser.Lex(); // Eat hash token. 2758 2759 const MCExpr *ShiftAmount; 2760 SMLoc E = Parser.getTok().getLoc(); 2761 if (getParser().ParseExpression(ShiftAmount)) { 2762 Error(E, "malformed shift expression"); 2763 return MatchOperand_ParseFail; 2764 } 2765 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2766 if (!CE) { 2767 Error(E, "shift amount must be an immediate"); 2768 return MatchOperand_ParseFail; 2769 } 2770 2771 int64_t Val = CE->getValue(); 2772 if (isASR) { 2773 // Shift amount must be in [1,32] 2774 if (Val < 1 || Val > 32) { 2775 Error(E, "'asr' shift amount must be in range [1,32]"); 2776 return MatchOperand_ParseFail; 2777 } 2778 // asr #32 encoded as asr #0, but is not allowed in Thumb2 mode. 2779 if (isThumb() && Val == 32) { 2780 Error(E, "'asr #32' shift amount not allowed in Thumb mode"); 2781 return MatchOperand_ParseFail; 2782 } 2783 if (Val == 32) Val = 0; 2784 } else { 2785 // Shift amount must be in [1,32] 2786 if (Val < 0 || Val > 31) { 2787 Error(E, "'lsr' shift amount must be in range [0,31]"); 2788 return MatchOperand_ParseFail; 2789 } 2790 } 2791 2792 E = Parser.getTok().getLoc(); 2793 Operands.push_back(ARMOperand::CreateShifterImm(isASR, Val, S, E)); 2794 2795 return MatchOperand_Success; 2796} 2797 2798/// parseRotImm - Parse the shifter immediate operand for SXTB/UXTB family 2799/// of instructions. Legal values are: 2800/// ror #n 'n' in {0, 8, 16, 24} 2801ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2802parseRotImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2803 const AsmToken &Tok = Parser.getTok(); 2804 SMLoc S = Tok.getLoc(); 2805 if (Tok.isNot(AsmToken::Identifier)) 2806 return MatchOperand_NoMatch; 2807 StringRef ShiftName = Tok.getString(); 2808 if (ShiftName != "ror" && ShiftName != "ROR") 2809 return MatchOperand_NoMatch; 2810 Parser.Lex(); // Eat the operator. 2811 2812 // A '#' and a rotate amount. 2813 if (Parser.getTok().isNot(AsmToken::Hash)) { 2814 Error(Parser.getTok().getLoc(), "'#' expected"); 2815 return MatchOperand_ParseFail; 2816 } 2817 Parser.Lex(); // Eat hash token. 2818 2819 const MCExpr *ShiftAmount; 2820 SMLoc E = Parser.getTok().getLoc(); 2821 if (getParser().ParseExpression(ShiftAmount)) { 2822 Error(E, "malformed rotate expression"); 2823 return MatchOperand_ParseFail; 2824 } 2825 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ShiftAmount); 2826 if (!CE) { 2827 Error(E, "rotate amount must be an immediate"); 2828 return MatchOperand_ParseFail; 2829 } 2830 2831 int64_t Val = CE->getValue(); 2832 // Shift amount must be in {0, 8, 16, 24} (0 is undocumented extension) 2833 // normally, zero is represented in asm by omitting the rotate operand 2834 // entirely. 2835 if (Val != 8 && Val != 16 && Val != 24 && Val != 0) { 2836 Error(E, "'ror' rotate amount must be 8, 16, or 24"); 2837 return MatchOperand_ParseFail; 2838 } 2839 2840 E = Parser.getTok().getLoc(); 2841 Operands.push_back(ARMOperand::CreateRotImm(Val, S, E)); 2842 2843 return MatchOperand_Success; 2844} 2845 2846ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2847parseBitfield(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2848 SMLoc S = Parser.getTok().getLoc(); 2849 // The bitfield descriptor is really two operands, the LSB and the width. 2850 if (Parser.getTok().isNot(AsmToken::Hash)) { 2851 Error(Parser.getTok().getLoc(), "'#' expected"); 2852 return MatchOperand_ParseFail; 2853 } 2854 Parser.Lex(); // Eat hash token. 2855 2856 const MCExpr *LSBExpr; 2857 SMLoc E = Parser.getTok().getLoc(); 2858 if (getParser().ParseExpression(LSBExpr)) { 2859 Error(E, "malformed immediate expression"); 2860 return MatchOperand_ParseFail; 2861 } 2862 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(LSBExpr); 2863 if (!CE) { 2864 Error(E, "'lsb' operand must be an immediate"); 2865 return MatchOperand_ParseFail; 2866 } 2867 2868 int64_t LSB = CE->getValue(); 2869 // The LSB must be in the range [0,31] 2870 if (LSB < 0 || LSB > 31) { 2871 Error(E, "'lsb' operand must be in the range [0,31]"); 2872 return MatchOperand_ParseFail; 2873 } 2874 E = Parser.getTok().getLoc(); 2875 2876 // Expect another immediate operand. 2877 if (Parser.getTok().isNot(AsmToken::Comma)) { 2878 Error(Parser.getTok().getLoc(), "too few operands"); 2879 return MatchOperand_ParseFail; 2880 } 2881 Parser.Lex(); // Eat hash token. 2882 if (Parser.getTok().isNot(AsmToken::Hash)) { 2883 Error(Parser.getTok().getLoc(), "'#' expected"); 2884 return MatchOperand_ParseFail; 2885 } 2886 Parser.Lex(); // Eat hash token. 2887 2888 const MCExpr *WidthExpr; 2889 if (getParser().ParseExpression(WidthExpr)) { 2890 Error(E, "malformed immediate expression"); 2891 return MatchOperand_ParseFail; 2892 } 2893 CE = dyn_cast<MCConstantExpr>(WidthExpr); 2894 if (!CE) { 2895 Error(E, "'width' operand must be an immediate"); 2896 return MatchOperand_ParseFail; 2897 } 2898 2899 int64_t Width = CE->getValue(); 2900 // The LSB must be in the range [1,32-lsb] 2901 if (Width < 1 || Width > 32 - LSB) { 2902 Error(E, "'width' operand must be in the range [1,32-lsb]"); 2903 return MatchOperand_ParseFail; 2904 } 2905 E = Parser.getTok().getLoc(); 2906 2907 Operands.push_back(ARMOperand::CreateBitfield(LSB, Width, S, E)); 2908 2909 return MatchOperand_Success; 2910} 2911 2912ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2913parsePostIdxReg(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2914 // Check for a post-index addressing register operand. Specifically: 2915 // postidx_reg := '+' register {, shift} 2916 // | '-' register {, shift} 2917 // | register {, shift} 2918 2919 // This method must return MatchOperand_NoMatch without consuming any tokens 2920 // in the case where there is no match, as other alternatives take other 2921 // parse methods. 2922 AsmToken Tok = Parser.getTok(); 2923 SMLoc S = Tok.getLoc(); 2924 bool haveEaten = false; 2925 bool isAdd = true; 2926 int Reg = -1; 2927 if (Tok.is(AsmToken::Plus)) { 2928 Parser.Lex(); // Eat the '+' token. 2929 haveEaten = true; 2930 } else if (Tok.is(AsmToken::Minus)) { 2931 Parser.Lex(); // Eat the '-' token. 2932 isAdd = false; 2933 haveEaten = true; 2934 } 2935 if (Parser.getTok().is(AsmToken::Identifier)) 2936 Reg = tryParseRegister(); 2937 if (Reg == -1) { 2938 if (!haveEaten) 2939 return MatchOperand_NoMatch; 2940 Error(Parser.getTok().getLoc(), "register expected"); 2941 return MatchOperand_ParseFail; 2942 } 2943 SMLoc E = Parser.getTok().getLoc(); 2944 2945 ARM_AM::ShiftOpc ShiftTy = ARM_AM::no_shift; 2946 unsigned ShiftImm = 0; 2947 if (Parser.getTok().is(AsmToken::Comma)) { 2948 Parser.Lex(); // Eat the ','. 2949 if (parseMemRegOffsetShift(ShiftTy, ShiftImm)) 2950 return MatchOperand_ParseFail; 2951 } 2952 2953 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ShiftTy, 2954 ShiftImm, S, E)); 2955 2956 return MatchOperand_Success; 2957} 2958 2959ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 2960parseAM3Offset(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 2961 // Check for a post-index addressing register operand. Specifically: 2962 // am3offset := '+' register 2963 // | '-' register 2964 // | register 2965 // | # imm 2966 // | # + imm 2967 // | # - imm 2968 2969 // This method must return MatchOperand_NoMatch without consuming any tokens 2970 // in the case where there is no match, as other alternatives take other 2971 // parse methods. 2972 AsmToken Tok = Parser.getTok(); 2973 SMLoc S = Tok.getLoc(); 2974 2975 // Do immediates first, as we always parse those if we have a '#'. 2976 if (Parser.getTok().is(AsmToken::Hash)) { 2977 Parser.Lex(); // Eat the '#'. 2978 // Explicitly look for a '-', as we need to encode negative zero 2979 // differently. 2980 bool isNegative = Parser.getTok().is(AsmToken::Minus); 2981 const MCExpr *Offset; 2982 if (getParser().ParseExpression(Offset)) 2983 return MatchOperand_ParseFail; 2984 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 2985 if (!CE) { 2986 Error(S, "constant expression expected"); 2987 return MatchOperand_ParseFail; 2988 } 2989 SMLoc E = Tok.getLoc(); 2990 // Negative zero is encoded as the flag value INT32_MIN. 2991 int32_t Val = CE->getValue(); 2992 if (isNegative && Val == 0) 2993 Val = INT32_MIN; 2994 2995 Operands.push_back( 2996 ARMOperand::CreateImm(MCConstantExpr::Create(Val, getContext()), S, E)); 2997 2998 return MatchOperand_Success; 2999 } 3000 3001 3002 bool haveEaten = false; 3003 bool isAdd = true; 3004 int Reg = -1; 3005 if (Tok.is(AsmToken::Plus)) { 3006 Parser.Lex(); // Eat the '+' token. 3007 haveEaten = true; 3008 } else if (Tok.is(AsmToken::Minus)) { 3009 Parser.Lex(); // Eat the '-' token. 3010 isAdd = false; 3011 haveEaten = true; 3012 } 3013 if (Parser.getTok().is(AsmToken::Identifier)) 3014 Reg = tryParseRegister(); 3015 if (Reg == -1) { 3016 if (!haveEaten) 3017 return MatchOperand_NoMatch; 3018 Error(Parser.getTok().getLoc(), "register expected"); 3019 return MatchOperand_ParseFail; 3020 } 3021 SMLoc E = Parser.getTok().getLoc(); 3022 3023 Operands.push_back(ARMOperand::CreatePostIdxReg(Reg, isAdd, ARM_AM::no_shift, 3024 0, S, E)); 3025 3026 return MatchOperand_Success; 3027} 3028 3029/// cvtT2LdrdPre - Convert parsed operands to MCInst. 3030/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3031/// when they refer multiple MIOperands inside a single one. 3032bool ARMAsmParser:: 3033cvtT2LdrdPre(MCInst &Inst, unsigned Opcode, 3034 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3035 // Rt, Rt2 3036 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3037 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3038 // Create a writeback register dummy placeholder. 3039 Inst.addOperand(MCOperand::CreateReg(0)); 3040 // addr 3041 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3042 // pred 3043 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3044 return true; 3045} 3046 3047/// cvtT2StrdPre - Convert parsed operands to MCInst. 3048/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3049/// when they refer multiple MIOperands inside a single one. 3050bool ARMAsmParser:: 3051cvtT2StrdPre(MCInst &Inst, unsigned Opcode, 3052 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3053 // Create a writeback register dummy placeholder. 3054 Inst.addOperand(MCOperand::CreateReg(0)); 3055 // Rt, Rt2 3056 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3057 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3058 // addr 3059 ((ARMOperand*)Operands[4])->addMemImm8s4OffsetOperands(Inst, 2); 3060 // pred 3061 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3062 return true; 3063} 3064 3065/// cvtLdWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3066/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3067/// when they refer multiple MIOperands inside a single one. 3068bool ARMAsmParser:: 3069cvtLdWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3070 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3071 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3072 3073 // Create a writeback register dummy placeholder. 3074 Inst.addOperand(MCOperand::CreateImm(0)); 3075 3076 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3077 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3078 return true; 3079} 3080 3081/// cvtStWriteBackRegT2AddrModeImm8 - Convert parsed operands to MCInst. 3082/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3083/// when they refer multiple MIOperands inside a single one. 3084bool ARMAsmParser:: 3085cvtStWriteBackRegT2AddrModeImm8(MCInst &Inst, unsigned Opcode, 3086 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3087 // Create a writeback register dummy placeholder. 3088 Inst.addOperand(MCOperand::CreateImm(0)); 3089 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3090 ((ARMOperand*)Operands[3])->addMemImm8OffsetOperands(Inst, 2); 3091 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3092 return true; 3093} 3094 3095/// cvtLdWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3096/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3097/// when they refer multiple MIOperands inside a single one. 3098bool ARMAsmParser:: 3099cvtLdWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3100 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3101 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3102 3103 // Create a writeback register dummy placeholder. 3104 Inst.addOperand(MCOperand::CreateImm(0)); 3105 3106 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3107 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3108 return true; 3109} 3110 3111/// cvtLdWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3112/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3113/// when they refer multiple MIOperands inside a single one. 3114bool ARMAsmParser:: 3115cvtLdWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3116 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3117 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3118 3119 // Create a writeback register dummy placeholder. 3120 Inst.addOperand(MCOperand::CreateImm(0)); 3121 3122 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3123 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3124 return true; 3125} 3126 3127 3128/// cvtStWriteBackRegAddrModeImm12 - Convert parsed operands to MCInst. 3129/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3130/// when they refer multiple MIOperands inside a single one. 3131bool ARMAsmParser:: 3132cvtStWriteBackRegAddrModeImm12(MCInst &Inst, unsigned Opcode, 3133 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3134 // Create a writeback register dummy placeholder. 3135 Inst.addOperand(MCOperand::CreateImm(0)); 3136 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3137 ((ARMOperand*)Operands[3])->addMemImm12OffsetOperands(Inst, 2); 3138 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3139 return true; 3140} 3141 3142/// cvtStWriteBackRegAddrMode2 - Convert parsed operands to MCInst. 3143/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3144/// when they refer multiple MIOperands inside a single one. 3145bool ARMAsmParser:: 3146cvtStWriteBackRegAddrMode2(MCInst &Inst, unsigned Opcode, 3147 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3148 // Create a writeback register dummy placeholder. 3149 Inst.addOperand(MCOperand::CreateImm(0)); 3150 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3151 ((ARMOperand*)Operands[3])->addAddrMode2Operands(Inst, 3); 3152 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3153 return true; 3154} 3155 3156/// cvtStWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3157/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3158/// when they refer multiple MIOperands inside a single one. 3159bool ARMAsmParser:: 3160cvtStWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3161 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3162 // Create a writeback register dummy placeholder. 3163 Inst.addOperand(MCOperand::CreateImm(0)); 3164 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3165 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3166 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3167 return true; 3168} 3169 3170/// cvtLdExtTWriteBackImm - Convert parsed operands to MCInst. 3171/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3172/// when they refer multiple MIOperands inside a single one. 3173bool ARMAsmParser:: 3174cvtLdExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3175 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3176 // Rt 3177 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3178 // Create a writeback register dummy placeholder. 3179 Inst.addOperand(MCOperand::CreateImm(0)); 3180 // addr 3181 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3182 // offset 3183 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3184 // pred 3185 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3186 return true; 3187} 3188 3189/// cvtLdExtTWriteBackReg - Convert parsed operands to MCInst. 3190/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3191/// when they refer multiple MIOperands inside a single one. 3192bool ARMAsmParser:: 3193cvtLdExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3194 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3195 // Rt 3196 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3197 // Create a writeback register dummy placeholder. 3198 Inst.addOperand(MCOperand::CreateImm(0)); 3199 // addr 3200 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3201 // offset 3202 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3203 // pred 3204 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3205 return true; 3206} 3207 3208/// cvtStExtTWriteBackImm - Convert parsed operands to MCInst. 3209/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3210/// when they refer multiple MIOperands inside a single one. 3211bool ARMAsmParser:: 3212cvtStExtTWriteBackImm(MCInst &Inst, unsigned Opcode, 3213 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3214 // Create a writeback register dummy placeholder. 3215 Inst.addOperand(MCOperand::CreateImm(0)); 3216 // Rt 3217 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3218 // addr 3219 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3220 // offset 3221 ((ARMOperand*)Operands[4])->addPostIdxImm8Operands(Inst, 1); 3222 // pred 3223 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3224 return true; 3225} 3226 3227/// cvtStExtTWriteBackReg - Convert parsed operands to MCInst. 3228/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3229/// when they refer multiple MIOperands inside a single one. 3230bool ARMAsmParser:: 3231cvtStExtTWriteBackReg(MCInst &Inst, unsigned Opcode, 3232 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3233 // Create a writeback register dummy placeholder. 3234 Inst.addOperand(MCOperand::CreateImm(0)); 3235 // Rt 3236 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3237 // addr 3238 ((ARMOperand*)Operands[3])->addMemNoOffsetOperands(Inst, 1); 3239 // offset 3240 ((ARMOperand*)Operands[4])->addPostIdxRegOperands(Inst, 2); 3241 // pred 3242 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3243 return true; 3244} 3245 3246/// cvtLdrdPre - Convert parsed operands to MCInst. 3247/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3248/// when they refer multiple MIOperands inside a single one. 3249bool ARMAsmParser:: 3250cvtLdrdPre(MCInst &Inst, unsigned Opcode, 3251 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3252 // Rt, Rt2 3253 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3254 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3255 // Create a writeback register dummy placeholder. 3256 Inst.addOperand(MCOperand::CreateImm(0)); 3257 // addr 3258 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3259 // pred 3260 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3261 return true; 3262} 3263 3264/// cvtStrdPre - Convert parsed operands to MCInst. 3265/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3266/// when they refer multiple MIOperands inside a single one. 3267bool ARMAsmParser:: 3268cvtStrdPre(MCInst &Inst, unsigned Opcode, 3269 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3270 // Create a writeback register dummy placeholder. 3271 Inst.addOperand(MCOperand::CreateImm(0)); 3272 // Rt, Rt2 3273 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3274 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3275 // addr 3276 ((ARMOperand*)Operands[4])->addAddrMode3Operands(Inst, 3); 3277 // pred 3278 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3279 return true; 3280} 3281 3282/// cvtLdWriteBackRegAddrMode3 - Convert parsed operands to MCInst. 3283/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3284/// when they refer multiple MIOperands inside a single one. 3285bool ARMAsmParser:: 3286cvtLdWriteBackRegAddrMode3(MCInst &Inst, unsigned Opcode, 3287 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3288 ((ARMOperand*)Operands[2])->addRegOperands(Inst, 1); 3289 // Create a writeback register dummy placeholder. 3290 Inst.addOperand(MCOperand::CreateImm(0)); 3291 ((ARMOperand*)Operands[3])->addAddrMode3Operands(Inst, 3); 3292 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3293 return true; 3294} 3295 3296/// cvtThumbMultiple- Convert parsed operands to MCInst. 3297/// Needed here because the Asm Gen Matcher can't handle properly tied operands 3298/// when they refer multiple MIOperands inside a single one. 3299bool ARMAsmParser:: 3300cvtThumbMultiply(MCInst &Inst, unsigned Opcode, 3301 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3302 // The second source operand must be the same register as the destination 3303 // operand. 3304 if (Operands.size() == 6 && 3305 (((ARMOperand*)Operands[3])->getReg() != 3306 ((ARMOperand*)Operands[5])->getReg()) && 3307 (((ARMOperand*)Operands[3])->getReg() != 3308 ((ARMOperand*)Operands[4])->getReg())) { 3309 Error(Operands[3]->getStartLoc(), 3310 "destination register must match source register"); 3311 return false; 3312 } 3313 ((ARMOperand*)Operands[3])->addRegOperands(Inst, 1); 3314 ((ARMOperand*)Operands[1])->addCCOutOperands(Inst, 1); 3315 ((ARMOperand*)Operands[4])->addRegOperands(Inst, 1); 3316 // If we have a three-operand form, use that, else the second source operand 3317 // is just the destination operand again. 3318 if (Operands.size() == 6) 3319 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3320 else 3321 Inst.addOperand(Inst.getOperand(0)); 3322 ((ARMOperand*)Operands[2])->addCondCodeOperands(Inst, 2); 3323 3324 return true; 3325} 3326 3327bool ARMAsmParser:: 3328cvtVLDwbFixed(MCInst &Inst, unsigned Opcode, 3329 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3330 // Vd 3331 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3332 // Create a writeback register dummy placeholder. 3333 Inst.addOperand(MCOperand::CreateImm(0)); 3334 // Vn 3335 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3336 // pred 3337 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3338 return true; 3339} 3340 3341bool ARMAsmParser:: 3342cvtVLDwbRegister(MCInst &Inst, unsigned Opcode, 3343 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3344 // Vd 3345 ((ARMOperand*)Operands[3])->addVecListTwoDOperands(Inst, 1); 3346 // Create a writeback register dummy placeholder. 3347 Inst.addOperand(MCOperand::CreateImm(0)); 3348 // Vn 3349 ((ARMOperand*)Operands[4])->addAlignedMemoryOperands(Inst, 2); 3350 // Vm 3351 ((ARMOperand*)Operands[5])->addRegOperands(Inst, 1); 3352 // pred 3353 ((ARMOperand*)Operands[1])->addCondCodeOperands(Inst, 2); 3354 return true; 3355} 3356 3357/// Parse an ARM memory expression, return false if successful else return true 3358/// or an error. The first token must be a '[' when called. 3359bool ARMAsmParser:: 3360parseMemory(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3361 SMLoc S, E; 3362 assert(Parser.getTok().is(AsmToken::LBrac) && 3363 "Token is not a Left Bracket"); 3364 S = Parser.getTok().getLoc(); 3365 Parser.Lex(); // Eat left bracket token. 3366 3367 const AsmToken &BaseRegTok = Parser.getTok(); 3368 int BaseRegNum = tryParseRegister(); 3369 if (BaseRegNum == -1) 3370 return Error(BaseRegTok.getLoc(), "register expected"); 3371 3372 // The next token must either be a comma or a closing bracket. 3373 const AsmToken &Tok = Parser.getTok(); 3374 if (!Tok.is(AsmToken::Comma) && !Tok.is(AsmToken::RBrac)) 3375 return Error(Tok.getLoc(), "malformed memory operand"); 3376 3377 if (Tok.is(AsmToken::RBrac)) { 3378 E = Tok.getLoc(); 3379 Parser.Lex(); // Eat right bracket token. 3380 3381 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, ARM_AM::no_shift, 3382 0, 0, false, S, E)); 3383 3384 // If there's a pre-indexing writeback marker, '!', just add it as a token 3385 // operand. It's rather odd, but syntactically valid. 3386 if (Parser.getTok().is(AsmToken::Exclaim)) { 3387 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3388 Parser.Lex(); // Eat the '!'. 3389 } 3390 3391 return false; 3392 } 3393 3394 assert(Tok.is(AsmToken::Comma) && "Lost comma in memory operand?!"); 3395 Parser.Lex(); // Eat the comma. 3396 3397 // If we have a ':', it's an alignment specifier. 3398 if (Parser.getTok().is(AsmToken::Colon)) { 3399 Parser.Lex(); // Eat the ':'. 3400 E = Parser.getTok().getLoc(); 3401 3402 const MCExpr *Expr; 3403 if (getParser().ParseExpression(Expr)) 3404 return true; 3405 3406 // The expression has to be a constant. Memory references with relocations 3407 // don't come through here, as they use the <label> forms of the relevant 3408 // instructions. 3409 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3410 if (!CE) 3411 return Error (E, "constant expression expected"); 3412 3413 unsigned Align = 0; 3414 switch (CE->getValue()) { 3415 default: 3416 return Error(E, "alignment specifier must be 64, 128, or 256 bits"); 3417 case 64: Align = 8; break; 3418 case 128: Align = 16; break; 3419 case 256: Align = 32; break; 3420 } 3421 3422 // Now we should have the closing ']' 3423 E = Parser.getTok().getLoc(); 3424 if (Parser.getTok().isNot(AsmToken::RBrac)) 3425 return Error(E, "']' expected"); 3426 Parser.Lex(); // Eat right bracket token. 3427 3428 // Don't worry about range checking the value here. That's handled by 3429 // the is*() predicates. 3430 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, 0, 3431 ARM_AM::no_shift, 0, Align, 3432 false, S, E)); 3433 3434 // If there's a pre-indexing writeback marker, '!', just add it as a token 3435 // operand. 3436 if (Parser.getTok().is(AsmToken::Exclaim)) { 3437 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3438 Parser.Lex(); // Eat the '!'. 3439 } 3440 3441 return false; 3442 } 3443 3444 // If we have a '#', it's an immediate offset, else assume it's a register 3445 // offset. 3446 if (Parser.getTok().is(AsmToken::Hash)) { 3447 Parser.Lex(); // Eat the '#'. 3448 E = Parser.getTok().getLoc(); 3449 3450 bool isNegative = getParser().getTok().is(AsmToken::Minus); 3451 const MCExpr *Offset; 3452 if (getParser().ParseExpression(Offset)) 3453 return true; 3454 3455 // The expression has to be a constant. Memory references with relocations 3456 // don't come through here, as they use the <label> forms of the relevant 3457 // instructions. 3458 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Offset); 3459 if (!CE) 3460 return Error (E, "constant expression expected"); 3461 3462 // If the constant was #-0, represent it as INT32_MIN. 3463 int32_t Val = CE->getValue(); 3464 if (isNegative && Val == 0) 3465 CE = MCConstantExpr::Create(INT32_MIN, getContext()); 3466 3467 // Now we should have the closing ']' 3468 E = Parser.getTok().getLoc(); 3469 if (Parser.getTok().isNot(AsmToken::RBrac)) 3470 return Error(E, "']' expected"); 3471 Parser.Lex(); // Eat right bracket token. 3472 3473 // Don't worry about range checking the value here. That's handled by 3474 // the is*() predicates. 3475 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, CE, 0, 3476 ARM_AM::no_shift, 0, 0, 3477 false, S, E)); 3478 3479 // If there's a pre-indexing writeback marker, '!', just add it as a token 3480 // operand. 3481 if (Parser.getTok().is(AsmToken::Exclaim)) { 3482 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3483 Parser.Lex(); // Eat the '!'. 3484 } 3485 3486 return false; 3487 } 3488 3489 // The register offset is optionally preceded by a '+' or '-' 3490 bool isNegative = false; 3491 if (Parser.getTok().is(AsmToken::Minus)) { 3492 isNegative = true; 3493 Parser.Lex(); // Eat the '-'. 3494 } else if (Parser.getTok().is(AsmToken::Plus)) { 3495 // Nothing to do. 3496 Parser.Lex(); // Eat the '+'. 3497 } 3498 3499 E = Parser.getTok().getLoc(); 3500 int OffsetRegNum = tryParseRegister(); 3501 if (OffsetRegNum == -1) 3502 return Error(E, "register expected"); 3503 3504 // If there's a shift operator, handle it. 3505 ARM_AM::ShiftOpc ShiftType = ARM_AM::no_shift; 3506 unsigned ShiftImm = 0; 3507 if (Parser.getTok().is(AsmToken::Comma)) { 3508 Parser.Lex(); // Eat the ','. 3509 if (parseMemRegOffsetShift(ShiftType, ShiftImm)) 3510 return true; 3511 } 3512 3513 // Now we should have the closing ']' 3514 E = Parser.getTok().getLoc(); 3515 if (Parser.getTok().isNot(AsmToken::RBrac)) 3516 return Error(E, "']' expected"); 3517 Parser.Lex(); // Eat right bracket token. 3518 3519 Operands.push_back(ARMOperand::CreateMem(BaseRegNum, 0, OffsetRegNum, 3520 ShiftType, ShiftImm, 0, isNegative, 3521 S, E)); 3522 3523 // If there's a pre-indexing writeback marker, '!', just add it as a token 3524 // operand. 3525 if (Parser.getTok().is(AsmToken::Exclaim)) { 3526 Operands.push_back(ARMOperand::CreateToken("!",Parser.getTok().getLoc())); 3527 Parser.Lex(); // Eat the '!'. 3528 } 3529 3530 return false; 3531} 3532 3533/// parseMemRegOffsetShift - one of these two: 3534/// ( lsl | lsr | asr | ror ) , # shift_amount 3535/// rrx 3536/// return true if it parses a shift otherwise it returns false. 3537bool ARMAsmParser::parseMemRegOffsetShift(ARM_AM::ShiftOpc &St, 3538 unsigned &Amount) { 3539 SMLoc Loc = Parser.getTok().getLoc(); 3540 const AsmToken &Tok = Parser.getTok(); 3541 if (Tok.isNot(AsmToken::Identifier)) 3542 return true; 3543 StringRef ShiftName = Tok.getString(); 3544 if (ShiftName == "lsl" || ShiftName == "LSL") 3545 St = ARM_AM::lsl; 3546 else if (ShiftName == "lsr" || ShiftName == "LSR") 3547 St = ARM_AM::lsr; 3548 else if (ShiftName == "asr" || ShiftName == "ASR") 3549 St = ARM_AM::asr; 3550 else if (ShiftName == "ror" || ShiftName == "ROR") 3551 St = ARM_AM::ror; 3552 else if (ShiftName == "rrx" || ShiftName == "RRX") 3553 St = ARM_AM::rrx; 3554 else 3555 return Error(Loc, "illegal shift operator"); 3556 Parser.Lex(); // Eat shift type token. 3557 3558 // rrx stands alone. 3559 Amount = 0; 3560 if (St != ARM_AM::rrx) { 3561 Loc = Parser.getTok().getLoc(); 3562 // A '#' and a shift amount. 3563 const AsmToken &HashTok = Parser.getTok(); 3564 if (HashTok.isNot(AsmToken::Hash)) 3565 return Error(HashTok.getLoc(), "'#' expected"); 3566 Parser.Lex(); // Eat hash token. 3567 3568 const MCExpr *Expr; 3569 if (getParser().ParseExpression(Expr)) 3570 return true; 3571 // Range check the immediate. 3572 // lsl, ror: 0 <= imm <= 31 3573 // lsr, asr: 0 <= imm <= 32 3574 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Expr); 3575 if (!CE) 3576 return Error(Loc, "shift amount must be an immediate"); 3577 int64_t Imm = CE->getValue(); 3578 if (Imm < 0 || 3579 ((St == ARM_AM::lsl || St == ARM_AM::ror) && Imm > 31) || 3580 ((St == ARM_AM::lsr || St == ARM_AM::asr) && Imm > 32)) 3581 return Error(Loc, "immediate shift value out of range"); 3582 Amount = Imm; 3583 } 3584 3585 return false; 3586} 3587 3588/// parseFPImm - A floating point immediate expression operand. 3589ARMAsmParser::OperandMatchResultTy ARMAsmParser:: 3590parseFPImm(SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3591 SMLoc S = Parser.getTok().getLoc(); 3592 3593 if (Parser.getTok().isNot(AsmToken::Hash)) 3594 return MatchOperand_NoMatch; 3595 3596 // Disambiguate the VMOV forms that can accept an FP immediate. 3597 // vmov.f32 <sreg>, #imm 3598 // vmov.f64 <dreg>, #imm 3599 // vmov.f32 <dreg>, #imm @ vector f32x2 3600 // vmov.f32 <qreg>, #imm @ vector f32x4 3601 // 3602 // There are also the NEON VMOV instructions which expect an 3603 // integer constant. Make sure we don't try to parse an FPImm 3604 // for these: 3605 // vmov.i{8|16|32|64} <dreg|qreg>, #imm 3606 ARMOperand *TyOp = static_cast<ARMOperand*>(Operands[2]); 3607 if (!TyOp->isToken() || (TyOp->getToken() != ".f32" && 3608 TyOp->getToken() != ".f64")) 3609 return MatchOperand_NoMatch; 3610 3611 Parser.Lex(); // Eat the '#'. 3612 3613 // Handle negation, as that still comes through as a separate token. 3614 bool isNegative = false; 3615 if (Parser.getTok().is(AsmToken::Minus)) { 3616 isNegative = true; 3617 Parser.Lex(); 3618 } 3619 const AsmToken &Tok = Parser.getTok(); 3620 if (Tok.is(AsmToken::Real)) { 3621 APFloat RealVal(APFloat::IEEEdouble, Tok.getString()); 3622 uint64_t IntVal = RealVal.bitcastToAPInt().getZExtValue(); 3623 // If we had a '-' in front, toggle the sign bit. 3624 IntVal ^= (uint64_t)isNegative << 63; 3625 int Val = ARM_AM::getFP64Imm(APInt(64, IntVal)); 3626 Parser.Lex(); // Eat the token. 3627 if (Val == -1) { 3628 TokError("floating point value out of range"); 3629 return MatchOperand_ParseFail; 3630 } 3631 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3632 return MatchOperand_Success; 3633 } 3634 if (Tok.is(AsmToken::Integer)) { 3635 int64_t Val = Tok.getIntVal(); 3636 Parser.Lex(); // Eat the token. 3637 if (Val > 255 || Val < 0) { 3638 TokError("encoded floating point value out of range"); 3639 return MatchOperand_ParseFail; 3640 } 3641 Operands.push_back(ARMOperand::CreateFPImm(Val, S, getContext())); 3642 return MatchOperand_Success; 3643 } 3644 3645 TokError("invalid floating point immediate"); 3646 return MatchOperand_ParseFail; 3647} 3648/// Parse a arm instruction operand. For now this parses the operand regardless 3649/// of the mnemonic. 3650bool ARMAsmParser::parseOperand(SmallVectorImpl<MCParsedAsmOperand*> &Operands, 3651 StringRef Mnemonic) { 3652 SMLoc S, E; 3653 3654 // Check if the current operand has a custom associated parser, if so, try to 3655 // custom parse the operand, or fallback to the general approach. 3656 OperandMatchResultTy ResTy = MatchOperandParserImpl(Operands, Mnemonic); 3657 if (ResTy == MatchOperand_Success) 3658 return false; 3659 // If there wasn't a custom match, try the generic matcher below. Otherwise, 3660 // there was a match, but an error occurred, in which case, just return that 3661 // the operand parsing failed. 3662 if (ResTy == MatchOperand_ParseFail) 3663 return true; 3664 3665 switch (getLexer().getKind()) { 3666 default: 3667 Error(Parser.getTok().getLoc(), "unexpected token in operand"); 3668 return true; 3669 case AsmToken::Identifier: { 3670 // If this is VMRS, check for the apsr_nzcv operand. 3671 if (!tryParseRegisterWithWriteBack(Operands)) 3672 return false; 3673 int Res = tryParseShiftRegister(Operands); 3674 if (Res == 0) // success 3675 return false; 3676 else if (Res == -1) // irrecoverable error 3677 return true; 3678 if (Mnemonic == "vmrs" && Parser.getTok().getString() == "apsr_nzcv") { 3679 S = Parser.getTok().getLoc(); 3680 Parser.Lex(); 3681 Operands.push_back(ARMOperand::CreateToken("apsr_nzcv", S)); 3682 return false; 3683 } 3684 3685 // Fall though for the Identifier case that is not a register or a 3686 // special name. 3687 } 3688 case AsmToken::LParen: // parenthesized expressions like (_strcmp-4) 3689 case AsmToken::Integer: // things like 1f and 2b as a branch targets 3690 case AsmToken::Dot: { // . as a branch target 3691 // This was not a register so parse other operands that start with an 3692 // identifier (like labels) as expressions and create them as immediates. 3693 const MCExpr *IdVal; 3694 S = Parser.getTok().getLoc(); 3695 if (getParser().ParseExpression(IdVal)) 3696 return true; 3697 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3698 Operands.push_back(ARMOperand::CreateImm(IdVal, S, E)); 3699 return false; 3700 } 3701 case AsmToken::LBrac: 3702 return parseMemory(Operands); 3703 case AsmToken::LCurly: 3704 return parseRegisterList(Operands); 3705 case AsmToken::Hash: { 3706 // #42 -> immediate. 3707 // TODO: ":lower16:" and ":upper16:" modifiers after # before immediate 3708 S = Parser.getTok().getLoc(); 3709 Parser.Lex(); 3710 bool isNegative = Parser.getTok().is(AsmToken::Minus); 3711 const MCExpr *ImmVal; 3712 if (getParser().ParseExpression(ImmVal)) 3713 return true; 3714 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(ImmVal); 3715 if (!CE) { 3716 Error(S, "constant expression expected"); 3717 return MatchOperand_ParseFail; 3718 } 3719 int32_t Val = CE->getValue(); 3720 if (isNegative && Val == 0) 3721 ImmVal = MCConstantExpr::Create(INT32_MIN, getContext()); 3722 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3723 Operands.push_back(ARMOperand::CreateImm(ImmVal, S, E)); 3724 return false; 3725 } 3726 case AsmToken::Colon: { 3727 // ":lower16:" and ":upper16:" expression prefixes 3728 // FIXME: Check it's an expression prefix, 3729 // e.g. (FOO - :lower16:BAR) isn't legal. 3730 ARMMCExpr::VariantKind RefKind; 3731 if (parsePrefix(RefKind)) 3732 return true; 3733 3734 const MCExpr *SubExprVal; 3735 if (getParser().ParseExpression(SubExprVal)) 3736 return true; 3737 3738 const MCExpr *ExprVal = ARMMCExpr::Create(RefKind, SubExprVal, 3739 getContext()); 3740 E = SMLoc::getFromPointer(Parser.getTok().getLoc().getPointer() - 1); 3741 Operands.push_back(ARMOperand::CreateImm(ExprVal, S, E)); 3742 return false; 3743 } 3744 } 3745} 3746 3747// parsePrefix - Parse ARM 16-bit relocations expression prefix, i.e. 3748// :lower16: and :upper16:. 3749bool ARMAsmParser::parsePrefix(ARMMCExpr::VariantKind &RefKind) { 3750 RefKind = ARMMCExpr::VK_ARM_None; 3751 3752 // :lower16: and :upper16: modifiers 3753 assert(getLexer().is(AsmToken::Colon) && "expected a :"); 3754 Parser.Lex(); // Eat ':' 3755 3756 if (getLexer().isNot(AsmToken::Identifier)) { 3757 Error(Parser.getTok().getLoc(), "expected prefix identifier in operand"); 3758 return true; 3759 } 3760 3761 StringRef IDVal = Parser.getTok().getIdentifier(); 3762 if (IDVal == "lower16") { 3763 RefKind = ARMMCExpr::VK_ARM_LO16; 3764 } else if (IDVal == "upper16") { 3765 RefKind = ARMMCExpr::VK_ARM_HI16; 3766 } else { 3767 Error(Parser.getTok().getLoc(), "unexpected prefix in operand"); 3768 return true; 3769 } 3770 Parser.Lex(); 3771 3772 if (getLexer().isNot(AsmToken::Colon)) { 3773 Error(Parser.getTok().getLoc(), "unexpected token after prefix"); 3774 return true; 3775 } 3776 Parser.Lex(); // Eat the last ':' 3777 return false; 3778} 3779 3780/// \brief Given a mnemonic, split out possible predication code and carry 3781/// setting letters to form a canonical mnemonic and flags. 3782// 3783// FIXME: Would be nice to autogen this. 3784// FIXME: This is a bit of a maze of special cases. 3785StringRef ARMAsmParser::splitMnemonic(StringRef Mnemonic, 3786 unsigned &PredicationCode, 3787 bool &CarrySetting, 3788 unsigned &ProcessorIMod, 3789 StringRef &ITMask) { 3790 PredicationCode = ARMCC::AL; 3791 CarrySetting = false; 3792 ProcessorIMod = 0; 3793 3794 // Ignore some mnemonics we know aren't predicated forms. 3795 // 3796 // FIXME: Would be nice to autogen this. 3797 if ((Mnemonic == "movs" && isThumb()) || 3798 Mnemonic == "teq" || Mnemonic == "vceq" || Mnemonic == "svc" || 3799 Mnemonic == "mls" || Mnemonic == "smmls" || Mnemonic == "vcls" || 3800 Mnemonic == "vmls" || Mnemonic == "vnmls" || Mnemonic == "vacge" || 3801 Mnemonic == "vcge" || Mnemonic == "vclt" || Mnemonic == "vacgt" || 3802 Mnemonic == "vcgt" || Mnemonic == "vcle" || Mnemonic == "smlal" || 3803 Mnemonic == "umaal" || Mnemonic == "umlal" || Mnemonic == "vabal" || 3804 Mnemonic == "vmlal" || Mnemonic == "vpadal" || Mnemonic == "vqdmlal") 3805 return Mnemonic; 3806 3807 // First, split out any predication code. Ignore mnemonics we know aren't 3808 // predicated but do have a carry-set and so weren't caught above. 3809 if (Mnemonic != "adcs" && Mnemonic != "bics" && Mnemonic != "movs" && 3810 Mnemonic != "muls" && Mnemonic != "smlals" && Mnemonic != "smulls" && 3811 Mnemonic != "umlals" && Mnemonic != "umulls" && Mnemonic != "lsls" && 3812 Mnemonic != "sbcs" && Mnemonic != "rscs") { 3813 unsigned CC = StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2)) 3814 .Case("eq", ARMCC::EQ) 3815 .Case("ne", ARMCC::NE) 3816 .Case("hs", ARMCC::HS) 3817 .Case("cs", ARMCC::HS) 3818 .Case("lo", ARMCC::LO) 3819 .Case("cc", ARMCC::LO) 3820 .Case("mi", ARMCC::MI) 3821 .Case("pl", ARMCC::PL) 3822 .Case("vs", ARMCC::VS) 3823 .Case("vc", ARMCC::VC) 3824 .Case("hi", ARMCC::HI) 3825 .Case("ls", ARMCC::LS) 3826 .Case("ge", ARMCC::GE) 3827 .Case("lt", ARMCC::LT) 3828 .Case("gt", ARMCC::GT) 3829 .Case("le", ARMCC::LE) 3830 .Case("al", ARMCC::AL) 3831 .Default(~0U); 3832 if (CC != ~0U) { 3833 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 2); 3834 PredicationCode = CC; 3835 } 3836 } 3837 3838 // Next, determine if we have a carry setting bit. We explicitly ignore all 3839 // the instructions we know end in 's'. 3840 if (Mnemonic.endswith("s") && 3841 !(Mnemonic == "cps" || Mnemonic == "mls" || 3842 Mnemonic == "mrs" || Mnemonic == "smmls" || Mnemonic == "vabs" || 3843 Mnemonic == "vcls" || Mnemonic == "vmls" || Mnemonic == "vmrs" || 3844 Mnemonic == "vnmls" || Mnemonic == "vqabs" || Mnemonic == "vrecps" || 3845 Mnemonic == "vrsqrts" || Mnemonic == "srs" || 3846 (Mnemonic == "movs" && isThumb()))) { 3847 Mnemonic = Mnemonic.slice(0, Mnemonic.size() - 1); 3848 CarrySetting = true; 3849 } 3850 3851 // The "cps" instruction can have a interrupt mode operand which is glued into 3852 // the mnemonic. Check if this is the case, split it and parse the imod op 3853 if (Mnemonic.startswith("cps")) { 3854 // Split out any imod code. 3855 unsigned IMod = 3856 StringSwitch<unsigned>(Mnemonic.substr(Mnemonic.size()-2, 2)) 3857 .Case("ie", ARM_PROC::IE) 3858 .Case("id", ARM_PROC::ID) 3859 .Default(~0U); 3860 if (IMod != ~0U) { 3861 Mnemonic = Mnemonic.slice(0, Mnemonic.size()-2); 3862 ProcessorIMod = IMod; 3863 } 3864 } 3865 3866 // The "it" instruction has the condition mask on the end of the mnemonic. 3867 if (Mnemonic.startswith("it")) { 3868 ITMask = Mnemonic.slice(2, Mnemonic.size()); 3869 Mnemonic = Mnemonic.slice(0, 2); 3870 } 3871 3872 return Mnemonic; 3873} 3874 3875/// \brief Given a canonical mnemonic, determine if the instruction ever allows 3876/// inclusion of carry set or predication code operands. 3877// 3878// FIXME: It would be nice to autogen this. 3879void ARMAsmParser:: 3880getMnemonicAcceptInfo(StringRef Mnemonic, bool &CanAcceptCarrySet, 3881 bool &CanAcceptPredicationCode) { 3882 if (Mnemonic == "and" || Mnemonic == "lsl" || Mnemonic == "lsr" || 3883 Mnemonic == "rrx" || Mnemonic == "ror" || Mnemonic == "sub" || 3884 Mnemonic == "add" || Mnemonic == "adc" || 3885 Mnemonic == "mul" || Mnemonic == "bic" || Mnemonic == "asr" || 3886 Mnemonic == "orr" || Mnemonic == "mvn" || 3887 Mnemonic == "rsb" || Mnemonic == "rsc" || Mnemonic == "orn" || 3888 Mnemonic == "sbc" || Mnemonic == "eor" || Mnemonic == "neg" || 3889 (!isThumb() && (Mnemonic == "smull" || Mnemonic == "mov" || 3890 Mnemonic == "mla" || Mnemonic == "smlal" || 3891 Mnemonic == "umlal" || Mnemonic == "umull"))) { 3892 CanAcceptCarrySet = true; 3893 } else 3894 CanAcceptCarrySet = false; 3895 3896 if (Mnemonic == "cbnz" || Mnemonic == "setend" || Mnemonic == "dmb" || 3897 Mnemonic == "cps" || Mnemonic == "mcr2" || Mnemonic == "it" || 3898 Mnemonic == "mcrr2" || Mnemonic == "cbz" || Mnemonic == "cdp2" || 3899 Mnemonic == "trap" || Mnemonic == "mrc2" || Mnemonic == "mrrc2" || 3900 Mnemonic == "dsb" || Mnemonic == "isb" || Mnemonic == "setend" || 3901 (Mnemonic == "clrex" && !isThumb()) || 3902 (Mnemonic == "nop" && isThumbOne()) || 3903 ((Mnemonic == "pld" || Mnemonic == "pli" || Mnemonic == "pldw" || 3904 Mnemonic == "ldc2" || Mnemonic == "ldc2l" || 3905 Mnemonic == "stc2" || Mnemonic == "stc2l") && !isThumb()) || 3906 ((Mnemonic.startswith("rfe") || Mnemonic.startswith("srs")) && 3907 !isThumb()) || 3908 Mnemonic.startswith("cps") || (Mnemonic == "movs" && isThumbOne())) { 3909 CanAcceptPredicationCode = false; 3910 } else 3911 CanAcceptPredicationCode = true; 3912 3913 if (isThumb()) { 3914 if (Mnemonic == "bkpt" || Mnemonic == "mcr" || Mnemonic == "mcrr" || 3915 Mnemonic == "mrc" || Mnemonic == "mrrc" || Mnemonic == "cdp") 3916 CanAcceptPredicationCode = false; 3917 } 3918} 3919 3920bool ARMAsmParser::shouldOmitCCOutOperand(StringRef Mnemonic, 3921 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 3922 // FIXME: This is all horribly hacky. We really need a better way to deal 3923 // with optional operands like this in the matcher table. 3924 3925 // The 'mov' mnemonic is special. One variant has a cc_out operand, while 3926 // another does not. Specifically, the MOVW instruction does not. So we 3927 // special case it here and remove the defaulted (non-setting) cc_out 3928 // operand if that's the instruction we're trying to match. 3929 // 3930 // We do this as post-processing of the explicit operands rather than just 3931 // conditionally adding the cc_out in the first place because we need 3932 // to check the type of the parsed immediate operand. 3933 if (Mnemonic == "mov" && Operands.size() > 4 && !isThumb() && 3934 !static_cast<ARMOperand*>(Operands[4])->isARMSOImm() && 3935 static_cast<ARMOperand*>(Operands[4])->isImm0_65535Expr() && 3936 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3937 return true; 3938 3939 // Register-register 'add' for thumb does not have a cc_out operand 3940 // when there are only two register operands. 3941 if (isThumb() && Mnemonic == "add" && Operands.size() == 5 && 3942 static_cast<ARMOperand*>(Operands[3])->isReg() && 3943 static_cast<ARMOperand*>(Operands[4])->isReg() && 3944 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 3945 return true; 3946 // Register-register 'add' for thumb does not have a cc_out operand 3947 // when it's an ADD Rdm, SP, {Rdm|#imm0_255} instruction. We do 3948 // have to check the immediate range here since Thumb2 has a variant 3949 // that can handle a different range and has a cc_out operand. 3950 if (((isThumb() && Mnemonic == "add") || 3951 (isThumbTwo() && Mnemonic == "sub")) && 3952 Operands.size() == 6 && 3953 static_cast<ARMOperand*>(Operands[3])->isReg() && 3954 static_cast<ARMOperand*>(Operands[4])->isReg() && 3955 static_cast<ARMOperand*>(Operands[4])->getReg() == ARM::SP && 3956 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3957 (static_cast<ARMOperand*>(Operands[5])->isReg() || 3958 static_cast<ARMOperand*>(Operands[5])->isImm0_1020s4())) 3959 return true; 3960 // For Thumb2, add/sub immediate does not have a cc_out operand for the 3961 // imm0_4095 variant. That's the least-preferred variant when 3962 // selecting via the generic "add" mnemonic, so to know that we 3963 // should remove the cc_out operand, we have to explicitly check that 3964 // it's not one of the other variants. Ugh. 3965 if (isThumbTwo() && (Mnemonic == "add" || Mnemonic == "sub") && 3966 Operands.size() == 6 && 3967 static_cast<ARMOperand*>(Operands[3])->isReg() && 3968 static_cast<ARMOperand*>(Operands[4])->isReg() && 3969 static_cast<ARMOperand*>(Operands[5])->isImm()) { 3970 // Nest conditions rather than one big 'if' statement for readability. 3971 // 3972 // If either register is a high reg, it's either one of the SP 3973 // variants (handled above) or a 32-bit encoding, so we just 3974 // check against T3. 3975 if ((!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 3976 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg())) && 3977 static_cast<ARMOperand*>(Operands[5])->isT2SOImm()) 3978 return false; 3979 // If both registers are low, we're in an IT block, and the immediate is 3980 // in range, we should use encoding T1 instead, which has a cc_out. 3981 if (inITBlock() && 3982 isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) && 3983 isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) && 3984 static_cast<ARMOperand*>(Operands[5])->isImm0_7()) 3985 return false; 3986 3987 // Otherwise, we use encoding T4, which does not have a cc_out 3988 // operand. 3989 return true; 3990 } 3991 3992 // The thumb2 multiply instruction doesn't have a CCOut register, so 3993 // if we have a "mul" mnemonic in Thumb mode, check if we'll be able to 3994 // use the 16-bit encoding or not. 3995 if (isThumbTwo() && Mnemonic == "mul" && Operands.size() == 6 && 3996 static_cast<ARMOperand*>(Operands[1])->getReg() == 0 && 3997 static_cast<ARMOperand*>(Operands[3])->isReg() && 3998 static_cast<ARMOperand*>(Operands[4])->isReg() && 3999 static_cast<ARMOperand*>(Operands[5])->isReg() && 4000 // If the registers aren't low regs, the destination reg isn't the 4001 // same as one of the source regs, or the cc_out operand is zero 4002 // outside of an IT block, we have to use the 32-bit encoding, so 4003 // remove the cc_out operand. 4004 (!isARMLowRegister(static_cast<ARMOperand*>(Operands[3])->getReg()) || 4005 !isARMLowRegister(static_cast<ARMOperand*>(Operands[4])->getReg()) || 4006 !inITBlock() || 4007 (static_cast<ARMOperand*>(Operands[3])->getReg() != 4008 static_cast<ARMOperand*>(Operands[5])->getReg() && 4009 static_cast<ARMOperand*>(Operands[3])->getReg() != 4010 static_cast<ARMOperand*>(Operands[4])->getReg()))) 4011 return true; 4012 4013 4014 4015 // Register-register 'add/sub' for thumb does not have a cc_out operand 4016 // when it's an ADD/SUB SP, #imm. Be lenient on count since there's also 4017 // the "add/sub SP, SP, #imm" version. If the follow-up operands aren't 4018 // right, this will result in better diagnostics (which operand is off) 4019 // anyway. 4020 if (isThumb() && (Mnemonic == "add" || Mnemonic == "sub") && 4021 (Operands.size() == 5 || Operands.size() == 6) && 4022 static_cast<ARMOperand*>(Operands[3])->isReg() && 4023 static_cast<ARMOperand*>(Operands[3])->getReg() == ARM::SP && 4024 static_cast<ARMOperand*>(Operands[1])->getReg() == 0) 4025 return true; 4026 4027 return false; 4028} 4029 4030/// Parse an arm instruction mnemonic followed by its operands. 4031bool ARMAsmParser::ParseInstruction(StringRef Name, SMLoc NameLoc, 4032 SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4033 // Create the leading tokens for the mnemonic, split by '.' characters. 4034 size_t Start = 0, Next = Name.find('.'); 4035 StringRef Mnemonic = Name.slice(Start, Next); 4036 4037 // Split out the predication code and carry setting flag from the mnemonic. 4038 unsigned PredicationCode; 4039 unsigned ProcessorIMod; 4040 bool CarrySetting; 4041 StringRef ITMask; 4042 Mnemonic = splitMnemonic(Mnemonic, PredicationCode, CarrySetting, 4043 ProcessorIMod, ITMask); 4044 4045 // In Thumb1, only the branch (B) instruction can be predicated. 4046 if (isThumbOne() && PredicationCode != ARMCC::AL && Mnemonic != "b") { 4047 Parser.EatToEndOfStatement(); 4048 return Error(NameLoc, "conditional execution not supported in Thumb1"); 4049 } 4050 4051 Operands.push_back(ARMOperand::CreateToken(Mnemonic, NameLoc)); 4052 4053 // Handle the IT instruction ITMask. Convert it to a bitmask. This 4054 // is the mask as it will be for the IT encoding if the conditional 4055 // encoding has a '1' as it's bit0 (i.e. 't' ==> '1'). In the case 4056 // where the conditional bit0 is zero, the instruction post-processing 4057 // will adjust the mask accordingly. 4058 if (Mnemonic == "it") { 4059 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + 2); 4060 if (ITMask.size() > 3) { 4061 Parser.EatToEndOfStatement(); 4062 return Error(Loc, "too many conditions on IT instruction"); 4063 } 4064 unsigned Mask = 8; 4065 for (unsigned i = ITMask.size(); i != 0; --i) { 4066 char pos = ITMask[i - 1]; 4067 if (pos != 't' && pos != 'e') { 4068 Parser.EatToEndOfStatement(); 4069 return Error(Loc, "illegal IT block condition mask '" + ITMask + "'"); 4070 } 4071 Mask >>= 1; 4072 if (ITMask[i - 1] == 't') 4073 Mask |= 8; 4074 } 4075 Operands.push_back(ARMOperand::CreateITMask(Mask, Loc)); 4076 } 4077 4078 // FIXME: This is all a pretty gross hack. We should automatically handle 4079 // optional operands like this via tblgen. 4080 4081 // Next, add the CCOut and ConditionCode operands, if needed. 4082 // 4083 // For mnemonics which can ever incorporate a carry setting bit or predication 4084 // code, our matching model involves us always generating CCOut and 4085 // ConditionCode operands to match the mnemonic "as written" and then we let 4086 // the matcher deal with finding the right instruction or generating an 4087 // appropriate error. 4088 bool CanAcceptCarrySet, CanAcceptPredicationCode; 4089 getMnemonicAcceptInfo(Mnemonic, CanAcceptCarrySet, CanAcceptPredicationCode); 4090 4091 // If we had a carry-set on an instruction that can't do that, issue an 4092 // error. 4093 if (!CanAcceptCarrySet && CarrySetting) { 4094 Parser.EatToEndOfStatement(); 4095 return Error(NameLoc, "instruction '" + Mnemonic + 4096 "' can not set flags, but 's' suffix specified"); 4097 } 4098 // If we had a predication code on an instruction that can't do that, issue an 4099 // error. 4100 if (!CanAcceptPredicationCode && PredicationCode != ARMCC::AL) { 4101 Parser.EatToEndOfStatement(); 4102 return Error(NameLoc, "instruction '" + Mnemonic + 4103 "' is not predicable, but condition code specified"); 4104 } 4105 4106 // Add the carry setting operand, if necessary. 4107 if (CanAcceptCarrySet) { 4108 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size()); 4109 Operands.push_back(ARMOperand::CreateCCOut(CarrySetting ? ARM::CPSR : 0, 4110 Loc)); 4111 } 4112 4113 // Add the predication code operand, if necessary. 4114 if (CanAcceptPredicationCode) { 4115 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Mnemonic.size() + 4116 CarrySetting); 4117 Operands.push_back(ARMOperand::CreateCondCode( 4118 ARMCC::CondCodes(PredicationCode), Loc)); 4119 } 4120 4121 // Add the processor imod operand, if necessary. 4122 if (ProcessorIMod) { 4123 Operands.push_back(ARMOperand::CreateImm( 4124 MCConstantExpr::Create(ProcessorIMod, getContext()), 4125 NameLoc, NameLoc)); 4126 } 4127 4128 // Add the remaining tokens in the mnemonic. 4129 while (Next != StringRef::npos) { 4130 Start = Next; 4131 Next = Name.find('.', Start + 1); 4132 StringRef ExtraToken = Name.slice(Start, Next); 4133 4134 // For now, we're only parsing Thumb1 (for the most part), so 4135 // just ignore ".n" qualifiers. We'll use them to restrict 4136 // matching when we do Thumb2. 4137 if (ExtraToken != ".n") { 4138 SMLoc Loc = SMLoc::getFromPointer(NameLoc.getPointer() + Start); 4139 Operands.push_back(ARMOperand::CreateToken(ExtraToken, Loc)); 4140 } 4141 } 4142 4143 // Read the remaining operands. 4144 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4145 // Read the first operand. 4146 if (parseOperand(Operands, Mnemonic)) { 4147 Parser.EatToEndOfStatement(); 4148 return true; 4149 } 4150 4151 while (getLexer().is(AsmToken::Comma)) { 4152 Parser.Lex(); // Eat the comma. 4153 4154 // Parse and remember the operand. 4155 if (parseOperand(Operands, Mnemonic)) { 4156 Parser.EatToEndOfStatement(); 4157 return true; 4158 } 4159 } 4160 } 4161 4162 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4163 SMLoc Loc = getLexer().getLoc(); 4164 Parser.EatToEndOfStatement(); 4165 return Error(Loc, "unexpected token in argument list"); 4166 } 4167 4168 Parser.Lex(); // Consume the EndOfStatement 4169 4170 // Some instructions, mostly Thumb, have forms for the same mnemonic that 4171 // do and don't have a cc_out optional-def operand. With some spot-checks 4172 // of the operand list, we can figure out which variant we're trying to 4173 // parse and adjust accordingly before actually matching. We shouldn't ever 4174 // try to remove a cc_out operand that was explicitly set on the the 4175 // mnemonic, of course (CarrySetting == true). Reason number #317 the 4176 // table driven matcher doesn't fit well with the ARM instruction set. 4177 if (!CarrySetting && shouldOmitCCOutOperand(Mnemonic, Operands)) { 4178 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4179 Operands.erase(Operands.begin() + 1); 4180 delete Op; 4181 } 4182 4183 // ARM mode 'blx' need special handling, as the register operand version 4184 // is predicable, but the label operand version is not. So, we can't rely 4185 // on the Mnemonic based checking to correctly figure out when to put 4186 // a k_CondCode operand in the list. If we're trying to match the label 4187 // version, remove the k_CondCode operand here. 4188 if (!isThumb() && Mnemonic == "blx" && Operands.size() == 3 && 4189 static_cast<ARMOperand*>(Operands[2])->isImm()) { 4190 ARMOperand *Op = static_cast<ARMOperand*>(Operands[1]); 4191 Operands.erase(Operands.begin() + 1); 4192 delete Op; 4193 } 4194 4195 // The vector-compare-to-zero instructions have a literal token "#0" at 4196 // the end that comes to here as an immediate operand. Convert it to a 4197 // token to play nicely with the matcher. 4198 if ((Mnemonic == "vceq" || Mnemonic == "vcge" || Mnemonic == "vcgt" || 4199 Mnemonic == "vcle" || Mnemonic == "vclt") && Operands.size() == 6 && 4200 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4201 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4202 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4203 if (CE && CE->getValue() == 0) { 4204 Operands.erase(Operands.begin() + 5); 4205 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4206 delete Op; 4207 } 4208 } 4209 // VCMP{E} does the same thing, but with a different operand count. 4210 if ((Mnemonic == "vcmp" || Mnemonic == "vcmpe") && Operands.size() == 5 && 4211 static_cast<ARMOperand*>(Operands[4])->isImm()) { 4212 ARMOperand *Op = static_cast<ARMOperand*>(Operands[4]); 4213 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4214 if (CE && CE->getValue() == 0) { 4215 Operands.erase(Operands.begin() + 4); 4216 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4217 delete Op; 4218 } 4219 } 4220 // Similarly, the Thumb1 "RSB" instruction has a literal "#0" on the 4221 // end. Convert it to a token here. 4222 if (Mnemonic == "rsb" && isThumb() && Operands.size() == 6 && 4223 static_cast<ARMOperand*>(Operands[5])->isImm()) { 4224 ARMOperand *Op = static_cast<ARMOperand*>(Operands[5]); 4225 const MCConstantExpr *CE = dyn_cast<MCConstantExpr>(Op->getImm()); 4226 if (CE && CE->getValue() == 0) { 4227 Operands.erase(Operands.begin() + 5); 4228 Operands.push_back(ARMOperand::CreateToken("#0", Op->getStartLoc())); 4229 delete Op; 4230 } 4231 } 4232 4233 return false; 4234} 4235 4236// Validate context-sensitive operand constraints. 4237 4238// return 'true' if register list contains non-low GPR registers, 4239// 'false' otherwise. If Reg is in the register list or is HiReg, set 4240// 'containsReg' to true. 4241static bool checkLowRegisterList(MCInst Inst, unsigned OpNo, unsigned Reg, 4242 unsigned HiReg, bool &containsReg) { 4243 containsReg = false; 4244 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4245 unsigned OpReg = Inst.getOperand(i).getReg(); 4246 if (OpReg == Reg) 4247 containsReg = true; 4248 // Anything other than a low register isn't legal here. 4249 if (!isARMLowRegister(OpReg) && (!HiReg || OpReg != HiReg)) 4250 return true; 4251 } 4252 return false; 4253} 4254 4255// Check if the specified regisgter is in the register list of the inst, 4256// starting at the indicated operand number. 4257static bool listContainsReg(MCInst &Inst, unsigned OpNo, unsigned Reg) { 4258 for (unsigned i = OpNo; i < Inst.getNumOperands(); ++i) { 4259 unsigned OpReg = Inst.getOperand(i).getReg(); 4260 if (OpReg == Reg) 4261 return true; 4262 } 4263 return false; 4264} 4265 4266// FIXME: We would really prefer to have MCInstrInfo (the wrapper around 4267// the ARMInsts array) instead. Getting that here requires awkward 4268// API changes, though. Better way? 4269namespace llvm { 4270extern const MCInstrDesc ARMInsts[]; 4271} 4272static const MCInstrDesc &getInstDesc(unsigned Opcode) { 4273 return ARMInsts[Opcode]; 4274} 4275 4276// FIXME: We would really like to be able to tablegen'erate this. 4277bool ARMAsmParser:: 4278validateInstruction(MCInst &Inst, 4279 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4280 const MCInstrDesc &MCID = getInstDesc(Inst.getOpcode()); 4281 SMLoc Loc = Operands[0]->getStartLoc(); 4282 // Check the IT block state first. 4283 // NOTE: In Thumb mode, the BKPT instruction has the interesting property of 4284 // being allowed in IT blocks, but not being predicable. It just always 4285 // executes. 4286 if (inITBlock() && Inst.getOpcode() != ARM::tBKPT) { 4287 unsigned bit = 1; 4288 if (ITState.FirstCond) 4289 ITState.FirstCond = false; 4290 else 4291 bit = (ITState.Mask >> (5 - ITState.CurPosition)) & 1; 4292 // The instruction must be predicable. 4293 if (!MCID.isPredicable()) 4294 return Error(Loc, "instructions in IT block must be predicable"); 4295 unsigned Cond = Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm(); 4296 unsigned ITCond = bit ? ITState.Cond : 4297 ARMCC::getOppositeCondition(ITState.Cond); 4298 if (Cond != ITCond) { 4299 // Find the condition code Operand to get its SMLoc information. 4300 SMLoc CondLoc; 4301 for (unsigned i = 1; i < Operands.size(); ++i) 4302 if (static_cast<ARMOperand*>(Operands[i])->isCondCode()) 4303 CondLoc = Operands[i]->getStartLoc(); 4304 return Error(CondLoc, "incorrect condition in IT block; got '" + 4305 StringRef(ARMCondCodeToString(ARMCC::CondCodes(Cond))) + 4306 "', but expected '" + 4307 ARMCondCodeToString(ARMCC::CondCodes(ITCond)) + "'"); 4308 } 4309 // Check for non-'al' condition codes outside of the IT block. 4310 } else if (isThumbTwo() && MCID.isPredicable() && 4311 Inst.getOperand(MCID.findFirstPredOperandIdx()).getImm() != 4312 ARMCC::AL && Inst.getOpcode() != ARM::tB && 4313 Inst.getOpcode() != ARM::t2B) 4314 return Error(Loc, "predicated instructions must be in IT block"); 4315 4316 switch (Inst.getOpcode()) { 4317 case ARM::LDRD: 4318 case ARM::LDRD_PRE: 4319 case ARM::LDRD_POST: 4320 case ARM::LDREXD: { 4321 // Rt2 must be Rt + 1. 4322 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4323 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4324 if (Rt2 != Rt + 1) 4325 return Error(Operands[3]->getStartLoc(), 4326 "destination operands must be sequential"); 4327 return false; 4328 } 4329 case ARM::STRD: { 4330 // Rt2 must be Rt + 1. 4331 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(0).getReg()); 4332 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4333 if (Rt2 != Rt + 1) 4334 return Error(Operands[3]->getStartLoc(), 4335 "source operands must be sequential"); 4336 return false; 4337 } 4338 case ARM::STRD_PRE: 4339 case ARM::STRD_POST: 4340 case ARM::STREXD: { 4341 // Rt2 must be Rt + 1. 4342 unsigned Rt = getARMRegisterNumbering(Inst.getOperand(1).getReg()); 4343 unsigned Rt2 = getARMRegisterNumbering(Inst.getOperand(2).getReg()); 4344 if (Rt2 != Rt + 1) 4345 return Error(Operands[3]->getStartLoc(), 4346 "source operands must be sequential"); 4347 return false; 4348 } 4349 case ARM::SBFX: 4350 case ARM::UBFX: { 4351 // width must be in range [1, 32-lsb] 4352 unsigned lsb = Inst.getOperand(2).getImm(); 4353 unsigned widthm1 = Inst.getOperand(3).getImm(); 4354 if (widthm1 >= 32 - lsb) 4355 return Error(Operands[5]->getStartLoc(), 4356 "bitfield width must be in range [1,32-lsb]"); 4357 return false; 4358 } 4359 case ARM::tLDMIA: { 4360 // If we're parsing Thumb2, the .w variant is available and handles 4361 // most cases that are normally illegal for a Thumb1 LDM 4362 // instruction. We'll make the transformation in processInstruction() 4363 // if necessary. 4364 // 4365 // Thumb LDM instructions are writeback iff the base register is not 4366 // in the register list. 4367 unsigned Rn = Inst.getOperand(0).getReg(); 4368 bool hasWritebackToken = 4369 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4370 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4371 bool listContainsBase; 4372 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) && !isThumbTwo()) 4373 return Error(Operands[3 + hasWritebackToken]->getStartLoc(), 4374 "registers must be in range r0-r7"); 4375 // If we should have writeback, then there should be a '!' token. 4376 if (!listContainsBase && !hasWritebackToken && !isThumbTwo()) 4377 return Error(Operands[2]->getStartLoc(), 4378 "writeback operator '!' expected"); 4379 // If we should not have writeback, there must not be a '!'. This is 4380 // true even for the 32-bit wide encodings. 4381 if (listContainsBase && hasWritebackToken) 4382 return Error(Operands[3]->getStartLoc(), 4383 "writeback operator '!' not allowed when base register " 4384 "in register list"); 4385 4386 break; 4387 } 4388 case ARM::t2LDMIA_UPD: { 4389 if (listContainsReg(Inst, 3, Inst.getOperand(0).getReg())) 4390 return Error(Operands[4]->getStartLoc(), 4391 "writeback operator '!' not allowed when base register " 4392 "in register list"); 4393 break; 4394 } 4395 case ARM::tPOP: { 4396 bool listContainsBase; 4397 if (checkLowRegisterList(Inst, 3, 0, ARM::PC, listContainsBase)) 4398 return Error(Operands[2]->getStartLoc(), 4399 "registers must be in range r0-r7 or pc"); 4400 break; 4401 } 4402 case ARM::tPUSH: { 4403 bool listContainsBase; 4404 if (checkLowRegisterList(Inst, 3, 0, ARM::LR, listContainsBase)) 4405 return Error(Operands[2]->getStartLoc(), 4406 "registers must be in range r0-r7 or lr"); 4407 break; 4408 } 4409 case ARM::tSTMIA_UPD: { 4410 bool listContainsBase; 4411 if (checkLowRegisterList(Inst, 4, 0, 0, listContainsBase) && !isThumbTwo()) 4412 return Error(Operands[4]->getStartLoc(), 4413 "registers must be in range r0-r7"); 4414 break; 4415 } 4416 } 4417 4418 return false; 4419} 4420 4421void ARMAsmParser:: 4422processInstruction(MCInst &Inst, 4423 const SmallVectorImpl<MCParsedAsmOperand*> &Operands) { 4424 switch (Inst.getOpcode()) { 4425 case ARM::LDMIA_UPD: 4426 // If this is a load of a single register via a 'pop', then we should use 4427 // a post-indexed LDR instruction instead, per the ARM ARM. 4428 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "pop" && 4429 Inst.getNumOperands() == 5) { 4430 MCInst TmpInst; 4431 TmpInst.setOpcode(ARM::LDR_POST_IMM); 4432 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4433 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4434 TmpInst.addOperand(Inst.getOperand(1)); // Rn 4435 TmpInst.addOperand(MCOperand::CreateReg(0)); // am2offset 4436 TmpInst.addOperand(MCOperand::CreateImm(4)); 4437 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4438 TmpInst.addOperand(Inst.getOperand(3)); 4439 Inst = TmpInst; 4440 } 4441 break; 4442 case ARM::STMDB_UPD: 4443 // If this is a store of a single register via a 'push', then we should use 4444 // a pre-indexed STR instruction instead, per the ARM ARM. 4445 if (static_cast<ARMOperand*>(Operands[0])->getToken() == "push" && 4446 Inst.getNumOperands() == 5) { 4447 MCInst TmpInst; 4448 TmpInst.setOpcode(ARM::STR_PRE_IMM); 4449 TmpInst.addOperand(Inst.getOperand(0)); // Rn_wb 4450 TmpInst.addOperand(Inst.getOperand(4)); // Rt 4451 TmpInst.addOperand(Inst.getOperand(1)); // addrmode_imm12 4452 TmpInst.addOperand(MCOperand::CreateImm(-4)); 4453 TmpInst.addOperand(Inst.getOperand(2)); // CondCode 4454 TmpInst.addOperand(Inst.getOperand(3)); 4455 Inst = TmpInst; 4456 } 4457 break; 4458 case ARM::tADDi8: 4459 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4460 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4461 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4462 // to encoding T1 if <Rd> is omitted." 4463 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4464 Inst.setOpcode(ARM::tADDi3); 4465 break; 4466 case ARM::tSUBi8: 4467 // If the immediate is in the range 0-7, we want tADDi3 iff Rd was 4468 // explicitly specified. From the ARM ARM: "Encoding T1 is preferred 4469 // to encoding T2 if <Rd> is specified and encoding T2 is preferred 4470 // to encoding T1 if <Rd> is omitted." 4471 if (Inst.getOperand(3).getImm() < 8 && Operands.size() == 6) 4472 Inst.setOpcode(ARM::tSUBi3); 4473 break; 4474 case ARM::tB: 4475 // A Thumb conditional branch outside of an IT block is a tBcc. 4476 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4477 Inst.setOpcode(ARM::tBcc); 4478 break; 4479 case ARM::t2B: 4480 // A Thumb2 conditional branch outside of an IT block is a t2Bcc. 4481 if (Inst.getOperand(1).getImm() != ARMCC::AL && !inITBlock()) 4482 Inst.setOpcode(ARM::t2Bcc); 4483 break; 4484 case ARM::t2Bcc: 4485 // If the conditional is AL or we're in an IT block, we really want t2B. 4486 if (Inst.getOperand(1).getImm() == ARMCC::AL || inITBlock()) 4487 Inst.setOpcode(ARM::t2B); 4488 break; 4489 case ARM::tBcc: 4490 // If the conditional is AL, we really want tB. 4491 if (Inst.getOperand(1).getImm() == ARMCC::AL) 4492 Inst.setOpcode(ARM::tB); 4493 break; 4494 case ARM::tLDMIA: { 4495 // If the register list contains any high registers, or if the writeback 4496 // doesn't match what tLDMIA can do, we need to use the 32-bit encoding 4497 // instead if we're in Thumb2. Otherwise, this should have generated 4498 // an error in validateInstruction(). 4499 unsigned Rn = Inst.getOperand(0).getReg(); 4500 bool hasWritebackToken = 4501 (static_cast<ARMOperand*>(Operands[3])->isToken() && 4502 static_cast<ARMOperand*>(Operands[3])->getToken() == "!"); 4503 bool listContainsBase; 4504 if (checkLowRegisterList(Inst, 3, Rn, 0, listContainsBase) || 4505 (!listContainsBase && !hasWritebackToken) || 4506 (listContainsBase && hasWritebackToken)) { 4507 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4508 assert (isThumbTwo()); 4509 Inst.setOpcode(hasWritebackToken ? ARM::t2LDMIA_UPD : ARM::t2LDMIA); 4510 // If we're switching to the updating version, we need to insert 4511 // the writeback tied operand. 4512 if (hasWritebackToken) 4513 Inst.insert(Inst.begin(), 4514 MCOperand::CreateReg(Inst.getOperand(0).getReg())); 4515 } 4516 break; 4517 } 4518 case ARM::tSTMIA_UPD: { 4519 // If the register list contains any high registers, we need to use 4520 // the 32-bit encoding instead if we're in Thumb2. Otherwise, this 4521 // should have generated an error in validateInstruction(). 4522 unsigned Rn = Inst.getOperand(0).getReg(); 4523 bool listContainsBase; 4524 if (checkLowRegisterList(Inst, 4, Rn, 0, listContainsBase)) { 4525 // 16-bit encoding isn't sufficient. Switch to the 32-bit version. 4526 assert (isThumbTwo()); 4527 Inst.setOpcode(ARM::t2STMIA_UPD); 4528 } 4529 break; 4530 } 4531 case ARM::t2MOVi: { 4532 // If we can use the 16-bit encoding and the user didn't explicitly 4533 // request the 32-bit variant, transform it here. 4534 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4535 Inst.getOperand(1).getImm() <= 255 && 4536 ((!inITBlock() && Inst.getOperand(2).getImm() == ARMCC::AL && 4537 Inst.getOperand(4).getReg() == ARM::CPSR) || 4538 (inITBlock() && Inst.getOperand(4).getReg() == 0)) && 4539 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4540 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4541 // The operands aren't in the same order for tMOVi8... 4542 MCInst TmpInst; 4543 TmpInst.setOpcode(ARM::tMOVi8); 4544 TmpInst.addOperand(Inst.getOperand(0)); 4545 TmpInst.addOperand(Inst.getOperand(4)); 4546 TmpInst.addOperand(Inst.getOperand(1)); 4547 TmpInst.addOperand(Inst.getOperand(2)); 4548 TmpInst.addOperand(Inst.getOperand(3)); 4549 Inst = TmpInst; 4550 } 4551 break; 4552 } 4553 case ARM::t2MOVr: { 4554 // If we can use the 16-bit encoding and the user didn't explicitly 4555 // request the 32-bit variant, transform it here. 4556 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4557 isARMLowRegister(Inst.getOperand(1).getReg()) && 4558 Inst.getOperand(2).getImm() == ARMCC::AL && 4559 Inst.getOperand(4).getReg() == ARM::CPSR && 4560 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4561 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4562 // The operands aren't the same for tMOV[S]r... (no cc_out) 4563 MCInst TmpInst; 4564 TmpInst.setOpcode(Inst.getOperand(4).getReg() ? ARM::tMOVSr : ARM::tMOVr); 4565 TmpInst.addOperand(Inst.getOperand(0)); 4566 TmpInst.addOperand(Inst.getOperand(1)); 4567 TmpInst.addOperand(Inst.getOperand(2)); 4568 TmpInst.addOperand(Inst.getOperand(3)); 4569 Inst = TmpInst; 4570 } 4571 break; 4572 } 4573 case ARM::t2SXTH: 4574 case ARM::t2SXTB: 4575 case ARM::t2UXTH: 4576 case ARM::t2UXTB: { 4577 // If we can use the 16-bit encoding and the user didn't explicitly 4578 // request the 32-bit variant, transform it here. 4579 if (isARMLowRegister(Inst.getOperand(0).getReg()) && 4580 isARMLowRegister(Inst.getOperand(1).getReg()) && 4581 Inst.getOperand(2).getImm() == 0 && 4582 (!static_cast<ARMOperand*>(Operands[2])->isToken() || 4583 static_cast<ARMOperand*>(Operands[2])->getToken() != ".w")) { 4584 unsigned NewOpc; 4585 switch (Inst.getOpcode()) { 4586 default: llvm_unreachable("Illegal opcode!"); 4587 case ARM::t2SXTH: NewOpc = ARM::tSXTH; break; 4588 case ARM::t2SXTB: NewOpc = ARM::tSXTB; break; 4589 case ARM::t2UXTH: NewOpc = ARM::tUXTH; break; 4590 case ARM::t2UXTB: NewOpc = ARM::tUXTB; break; 4591 } 4592 // The operands aren't the same for thumb1 (no rotate operand). 4593 MCInst TmpInst; 4594 TmpInst.setOpcode(NewOpc); 4595 TmpInst.addOperand(Inst.getOperand(0)); 4596 TmpInst.addOperand(Inst.getOperand(1)); 4597 TmpInst.addOperand(Inst.getOperand(3)); 4598 TmpInst.addOperand(Inst.getOperand(4)); 4599 Inst = TmpInst; 4600 } 4601 break; 4602 } 4603 case ARM::t2IT: { 4604 // The mask bits for all but the first condition are represented as 4605 // the low bit of the condition code value implies 't'. We currently 4606 // always have 1 implies 't', so XOR toggle the bits if the low bit 4607 // of the condition code is zero. The encoding also expects the low 4608 // bit of the condition to be encoded as bit 4 of the mask operand, 4609 // so mask that in if needed 4610 MCOperand &MO = Inst.getOperand(1); 4611 unsigned Mask = MO.getImm(); 4612 unsigned OrigMask = Mask; 4613 unsigned TZ = CountTrailingZeros_32(Mask); 4614 if ((Inst.getOperand(0).getImm() & 1) == 0) { 4615 assert(Mask && TZ <= 3 && "illegal IT mask value!"); 4616 for (unsigned i = 3; i != TZ; --i) 4617 Mask ^= 1 << i; 4618 } else 4619 Mask |= 0x10; 4620 MO.setImm(Mask); 4621 4622 // Set up the IT block state according to the IT instruction we just 4623 // matched. 4624 assert(!inITBlock() && "nested IT blocks?!"); 4625 ITState.Cond = ARMCC::CondCodes(Inst.getOperand(0).getImm()); 4626 ITState.Mask = OrigMask; // Use the original mask, not the updated one. 4627 ITState.CurPosition = 0; 4628 ITState.FirstCond = true; 4629 break; 4630 } 4631 } 4632} 4633 4634unsigned ARMAsmParser::checkTargetMatchPredicate(MCInst &Inst) { 4635 // 16-bit thumb arithmetic instructions either require or preclude the 'S' 4636 // suffix depending on whether they're in an IT block or not. 4637 unsigned Opc = Inst.getOpcode(); 4638 const MCInstrDesc &MCID = getInstDesc(Opc); 4639 if (MCID.TSFlags & ARMII::ThumbArithFlagSetting) { 4640 assert(MCID.hasOptionalDef() && 4641 "optionally flag setting instruction missing optional def operand"); 4642 assert(MCID.NumOperands == Inst.getNumOperands() && 4643 "operand count mismatch!"); 4644 // Find the optional-def operand (cc_out). 4645 unsigned OpNo; 4646 for (OpNo = 0; 4647 !MCID.OpInfo[OpNo].isOptionalDef() && OpNo < MCID.NumOperands; 4648 ++OpNo) 4649 ; 4650 // If we're parsing Thumb1, reject it completely. 4651 if (isThumbOne() && Inst.getOperand(OpNo).getReg() != ARM::CPSR) 4652 return Match_MnemonicFail; 4653 // If we're parsing Thumb2, which form is legal depends on whether we're 4654 // in an IT block. 4655 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() != ARM::CPSR && 4656 !inITBlock()) 4657 return Match_RequiresITBlock; 4658 if (isThumbTwo() && Inst.getOperand(OpNo).getReg() == ARM::CPSR && 4659 inITBlock()) 4660 return Match_RequiresNotITBlock; 4661 } 4662 // Some high-register supporting Thumb1 encodings only allow both registers 4663 // to be from r0-r7 when in Thumb2. 4664 else if (Opc == ARM::tADDhirr && isThumbOne() && 4665 isARMLowRegister(Inst.getOperand(1).getReg()) && 4666 isARMLowRegister(Inst.getOperand(2).getReg())) 4667 return Match_RequiresThumb2; 4668 // Others only require ARMv6 or later. 4669 else if (Opc == ARM::tMOVr && isThumbOne() && !hasV6Ops() && 4670 isARMLowRegister(Inst.getOperand(0).getReg()) && 4671 isARMLowRegister(Inst.getOperand(1).getReg())) 4672 return Match_RequiresV6; 4673 return Match_Success; 4674} 4675 4676bool ARMAsmParser:: 4677MatchAndEmitInstruction(SMLoc IDLoc, 4678 SmallVectorImpl<MCParsedAsmOperand*> &Operands, 4679 MCStreamer &Out) { 4680 MCInst Inst; 4681 unsigned ErrorInfo; 4682 unsigned MatchResult; 4683 MatchResult = MatchInstructionImpl(Operands, Inst, ErrorInfo); 4684 switch (MatchResult) { 4685 default: break; 4686 case Match_Success: 4687 // Context sensitive operand constraints aren't handled by the matcher, 4688 // so check them here. 4689 if (validateInstruction(Inst, Operands)) { 4690 // Still progress the IT block, otherwise one wrong condition causes 4691 // nasty cascading errors. 4692 forwardITPosition(); 4693 return true; 4694 } 4695 4696 // Some instructions need post-processing to, for example, tweak which 4697 // encoding is selected. 4698 processInstruction(Inst, Operands); 4699 4700 // Only move forward at the very end so that everything in validate 4701 // and process gets a consistent answer about whether we're in an IT 4702 // block. 4703 forwardITPosition(); 4704 4705 Out.EmitInstruction(Inst); 4706 return false; 4707 case Match_MissingFeature: 4708 Error(IDLoc, "instruction requires a CPU feature not currently enabled"); 4709 return true; 4710 case Match_InvalidOperand: { 4711 SMLoc ErrorLoc = IDLoc; 4712 if (ErrorInfo != ~0U) { 4713 if (ErrorInfo >= Operands.size()) 4714 return Error(IDLoc, "too few operands for instruction"); 4715 4716 ErrorLoc = ((ARMOperand*)Operands[ErrorInfo])->getStartLoc(); 4717 if (ErrorLoc == SMLoc()) ErrorLoc = IDLoc; 4718 } 4719 4720 return Error(ErrorLoc, "invalid operand for instruction"); 4721 } 4722 case Match_MnemonicFail: 4723 return Error(IDLoc, "invalid instruction"); 4724 case Match_ConversionFail: 4725 // The converter function will have already emited a diagnostic. 4726 return true; 4727 case Match_RequiresNotITBlock: 4728 return Error(IDLoc, "flag setting instruction only valid outside IT block"); 4729 case Match_RequiresITBlock: 4730 return Error(IDLoc, "instruction only valid inside IT block"); 4731 case Match_RequiresV6: 4732 return Error(IDLoc, "instruction variant requires ARMv6 or later"); 4733 case Match_RequiresThumb2: 4734 return Error(IDLoc, "instruction variant requires Thumb2"); 4735 } 4736 4737 llvm_unreachable("Implement any new match types added!"); 4738 return true; 4739} 4740 4741/// parseDirective parses the arm specific directives 4742bool ARMAsmParser::ParseDirective(AsmToken DirectiveID) { 4743 StringRef IDVal = DirectiveID.getIdentifier(); 4744 if (IDVal == ".word") 4745 return parseDirectiveWord(4, DirectiveID.getLoc()); 4746 else if (IDVal == ".thumb") 4747 return parseDirectiveThumb(DirectiveID.getLoc()); 4748 else if (IDVal == ".thumb_func") 4749 return parseDirectiveThumbFunc(DirectiveID.getLoc()); 4750 else if (IDVal == ".code") 4751 return parseDirectiveCode(DirectiveID.getLoc()); 4752 else if (IDVal == ".syntax") 4753 return parseDirectiveSyntax(DirectiveID.getLoc()); 4754 return true; 4755} 4756 4757/// parseDirectiveWord 4758/// ::= .word [ expression (, expression)* ] 4759bool ARMAsmParser::parseDirectiveWord(unsigned Size, SMLoc L) { 4760 if (getLexer().isNot(AsmToken::EndOfStatement)) { 4761 for (;;) { 4762 const MCExpr *Value; 4763 if (getParser().ParseExpression(Value)) 4764 return true; 4765 4766 getParser().getStreamer().EmitValue(Value, Size, 0/*addrspace*/); 4767 4768 if (getLexer().is(AsmToken::EndOfStatement)) 4769 break; 4770 4771 // FIXME: Improve diagnostic. 4772 if (getLexer().isNot(AsmToken::Comma)) 4773 return Error(L, "unexpected token in directive"); 4774 Parser.Lex(); 4775 } 4776 } 4777 4778 Parser.Lex(); 4779 return false; 4780} 4781 4782/// parseDirectiveThumb 4783/// ::= .thumb 4784bool ARMAsmParser::parseDirectiveThumb(SMLoc L) { 4785 if (getLexer().isNot(AsmToken::EndOfStatement)) 4786 return Error(L, "unexpected token in directive"); 4787 Parser.Lex(); 4788 4789 // TODO: set thumb mode 4790 // TODO: tell the MC streamer the mode 4791 // getParser().getStreamer().Emit???(); 4792 return false; 4793} 4794 4795/// parseDirectiveThumbFunc 4796/// ::= .thumbfunc symbol_name 4797bool ARMAsmParser::parseDirectiveThumbFunc(SMLoc L) { 4798 const MCAsmInfo &MAI = getParser().getStreamer().getContext().getAsmInfo(); 4799 bool isMachO = MAI.hasSubsectionsViaSymbols(); 4800 StringRef Name; 4801 4802 // Darwin asm has function name after .thumb_func direction 4803 // ELF doesn't 4804 if (isMachO) { 4805 const AsmToken &Tok = Parser.getTok(); 4806 if (Tok.isNot(AsmToken::Identifier) && Tok.isNot(AsmToken::String)) 4807 return Error(L, "unexpected token in .thumb_func directive"); 4808 Name = Tok.getString(); 4809 Parser.Lex(); // Consume the identifier token. 4810 } 4811 4812 if (getLexer().isNot(AsmToken::EndOfStatement)) 4813 return Error(L, "unexpected token in directive"); 4814 Parser.Lex(); 4815 4816 // FIXME: assuming function name will be the line following .thumb_func 4817 if (!isMachO) { 4818 Name = Parser.getTok().getString(); 4819 } 4820 4821 // Mark symbol as a thumb symbol. 4822 MCSymbol *Func = getParser().getContext().GetOrCreateSymbol(Name); 4823 getParser().getStreamer().EmitThumbFunc(Func); 4824 return false; 4825} 4826 4827/// parseDirectiveSyntax 4828/// ::= .syntax unified | divided 4829bool ARMAsmParser::parseDirectiveSyntax(SMLoc L) { 4830 const AsmToken &Tok = Parser.getTok(); 4831 if (Tok.isNot(AsmToken::Identifier)) 4832 return Error(L, "unexpected token in .syntax directive"); 4833 StringRef Mode = Tok.getString(); 4834 if (Mode == "unified" || Mode == "UNIFIED") 4835 Parser.Lex(); 4836 else if (Mode == "divided" || Mode == "DIVIDED") 4837 return Error(L, "'.syntax divided' arm asssembly not supported"); 4838 else 4839 return Error(L, "unrecognized syntax mode in .syntax directive"); 4840 4841 if (getLexer().isNot(AsmToken::EndOfStatement)) 4842 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4843 Parser.Lex(); 4844 4845 // TODO tell the MC streamer the mode 4846 // getParser().getStreamer().Emit???(); 4847 return false; 4848} 4849 4850/// parseDirectiveCode 4851/// ::= .code 16 | 32 4852bool ARMAsmParser::parseDirectiveCode(SMLoc L) { 4853 const AsmToken &Tok = Parser.getTok(); 4854 if (Tok.isNot(AsmToken::Integer)) 4855 return Error(L, "unexpected token in .code directive"); 4856 int64_t Val = Parser.getTok().getIntVal(); 4857 if (Val == 16) 4858 Parser.Lex(); 4859 else if (Val == 32) 4860 Parser.Lex(); 4861 else 4862 return Error(L, "invalid operand to .code directive"); 4863 4864 if (getLexer().isNot(AsmToken::EndOfStatement)) 4865 return Error(Parser.getTok().getLoc(), "unexpected token in directive"); 4866 Parser.Lex(); 4867 4868 if (Val == 16) { 4869 if (!isThumb()) 4870 SwitchMode(); 4871 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code16); 4872 } else { 4873 if (isThumb()) 4874 SwitchMode(); 4875 getParser().getStreamer().EmitAssemblerFlag(MCAF_Code32); 4876 } 4877 4878 return false; 4879} 4880 4881extern "C" void LLVMInitializeARMAsmLexer(); 4882 4883/// Force static initialization. 4884extern "C" void LLVMInitializeARMAsmParser() { 4885 RegisterMCAsmParser<ARMAsmParser> X(TheARMTarget); 4886 RegisterMCAsmParser<ARMAsmParser> Y(TheThumbTarget); 4887 LLVMInitializeARMAsmLexer(); 4888} 4889 4890#define GET_REGISTER_MATCHER 4891#define GET_MATCHER_IMPLEMENTATION 4892#include "ARMGenAsmMatcher.inc" 4893